diff --git a/buf.yaml b/buf.yaml index 5171d27ba7..cab90ddb25 100755 --- a/buf.yaml +++ b/buf.yaml @@ -9,31 +9,52 @@ lint: - FIELD_LOWER_SNAKE_CASE ignore_only: ENUM_VALUE_PREFIX: + - flyteidl2/common/configuration.proto - flyteidl2/common/list.proto - flyteidl2/common/runtime_version.proto - flyteidl2/core/artifact_id.proto - flyteidl2/core/catalog.proto + - flyteidl2/core/errors.proto + - flyteidl2/event/event.proto - flyteidl2/core/execution.proto - flyteidl2/core/identifier.proto - flyteidl2/core/security.proto - flyteidl2/core/tasks.proto - flyteidl2/core/types.proto + - flyteidl2/datacatalog/datacatalog.proto - flyteidl2/logs/dataplane/payload.proto - flyteidl2/secret/definition.proto - flyteidl2/plugins/spark.proto - flyteidl2/task/common.proto + - flyteidl2/plugins/kubeflow/common.proto ENUM_ZERO_VALUE_SUFFIX: - flyteidl2/common/authorization.proto + - flyteidl2/plugins/common.proto - flyteidl2/common/list.proto - flyteidl2/common/role.proto - flyteidl2/common/runtime_version.proto - flyteidl2/core/artifact_id.proto - flyteidl2/core/catalog.proto + - flyteidl2/core/errors.proto + - flyteidl2/event/event.proto - flyteidl2/core/execution.proto - flyteidl2/core/identifier.proto - flyteidl2/core/security.proto - flyteidl2/core/tasks.proto - flyteidl2/core/types.proto + - flyteidl2/datacatalog/datacatalog.proto - flyteidl2/logs/dataplane/payload.proto - flyteidl2/secret/definition.proto - - flyteidl2/plugins/spark.proto \ No newline at end of file + - flyteidl2/plugins/spark.proto + - flyteidl2/plugins/kubeflow/common.proto + RPC_REQUEST_RESPONSE_UNIQUE: + - flyteidl2/cacheservice/cacheservice.proto + - flyteidl2/cacheservice/v2/cacheservice.proto + RPC_REQUEST_STANDARD_NAME: + - flyteidl2/cacheservice/cacheservice.proto + - flyteidl2/cacheservice/v2/cacheservice.proto + RPC_RESPONSE_STANDARD_NAME: + - flyteidl2/cacheservice/cacheservice.proto + - flyteidl2/cacheservice/v2/cacheservice.proto + SERVICE_SUFFIX: + - flyteidl2/datacatalog/datacatalog.proto diff --git a/flyteidl2/cacheservice/cacheservice.proto b/flyteidl2/cacheservice/cacheservice.proto new file mode 100644 index 0000000000..efbfef105b --- /dev/null +++ b/flyteidl2/cacheservice/cacheservice.proto @@ -0,0 +1,147 @@ +syntax = "proto3"; + +package flyteidl2.cacheservice; + +import "flyteidl2/core/identifier.proto"; +import "flyteidl2/core/literals.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice"; + +/* + * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + */ +service CacheService { + // Retrieves cached data by key. + rpc Get(GetCacheRequest) returns (GetCacheResponse); + + // Stores or updates cached data by key. + rpc Put(PutCacheRequest) returns (PutCacheResponse); + + // Deletes cached data by key. + rpc Delete(DeleteCacheRequest) returns (DeleteCacheResponse); + + // Get or extend a reservation for a cache key + rpc GetOrExtendReservation(GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); + + // Release the reservation for a cache key + rpc ReleaseReservation(ReleaseReservationRequest) returns (ReleaseReservationResponse); +} + +/* + * Additional metadata as key-value pairs + */ +message KeyMapMetadata { + map values = 1; // Additional metadata as key-value pairs +} + +/* + * Metadata for cached outputs, including the source identifier and timestamps. + */ +message Metadata { + core.Identifier source_identifier = 1; // Source task or workflow identifier + KeyMapMetadata key_map = 2; // Additional metadata as key-value pairs + google.protobuf.Timestamp created_at = 3; // Creation timestamp + google.protobuf.Timestamp last_updated_at = 4; // Last update timestamp +} + +/* + * Represents cached output, either as literals or an URI, with associated metadata. + */ +message CachedOutput { + oneof output { + flyteidl2.core.LiteralMap output_literals = 1; // Output literals + string output_uri = 2; // URI to output data + } + Metadata metadata = 3; // Associated metadata +} + +/* + * Request to retrieve cached data by key. + */ +message GetCacheRequest { + string key = 1; // Cache key +} + +/* + * Response with cached data for a given key. + */ +message GetCacheResponse { + CachedOutput output = 1; // Cached output +} + +message OverwriteOutput { + bool overwrite = 1; // Overwrite flag + bool delete_blob = 2; // Delete existing blob + google.protobuf.Duration max_age = 3; // Maximum age of the cached output since last update +} + +/* + * Request to store/update cached data by key. + */ +message PutCacheRequest { + string key = 1; // Cache key + CachedOutput output = 2; // Output to cache + OverwriteOutput overwrite = 3; // Overwrite flag if exists +} + +/* + * Response message of cache store/update operation. + */ +message PutCacheResponse { + // Empty, success indicated by no errors +} + +/* + * Request to delete cached data by key. + */ +message DeleteCacheRequest { + string key = 1; // Cache key +} + +/* + * Response message of cache deletion operation. + */ +message DeleteCacheResponse { + // Empty, success indicated by no errors +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +message Reservation { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation + google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval + google.protobuf.Timestamp expires_at = 4; // Expiration timestamp of this reservation +} + +/* + * Request to get or extend a reservation for a cache key + */ +message GetOrExtendReservationRequest { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation + google.protobuf.Duration heartbeat_interval = 3; // Requested reservation extension heartbeat interval +} + +/* + * Request to get or extend a reservation for a cache key + */ +message GetOrExtendReservationResponse { + Reservation reservation = 1; // The reservation that was created or extended +} + +/* + * Request to release the reservation for a cache key + */ +message ReleaseReservationRequest { + string key = 1; // The unique ID for the reservation - same as the cache key + string owner_id = 2; // The unique ID of the owner for the reservation +} + +/* + * Response message of release reservation operation. + */ +message ReleaseReservationResponse { + // Empty, success indicated by no errors +} diff --git a/flyteidl2/cacheservice/v2/cacheservice.proto b/flyteidl2/cacheservice/v2/cacheservice.proto new file mode 100644 index 0000000000..bf98b3e1ee --- /dev/null +++ b/flyteidl2/cacheservice/v2/cacheservice.proto @@ -0,0 +1,78 @@ +syntax = "proto3"; + +package flyteidl2.cacheservice.v2; + +import "buf/validate/validate.proto"; +import "flyteidl2/cacheservice/cacheservice.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice/v2"; + +/* + * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + */ +service CacheService { + // Retrieves cached data by key. + rpc Get(GetCacheRequest) returns (flyteidl2.cacheservice.GetCacheResponse); + + // Stores or updates cached data by key. + rpc Put(PutCacheRequest) returns (flyteidl2.cacheservice.PutCacheResponse); + + // Deletes cached data by key. + rpc Delete(DeleteCacheRequest) returns (flyteidl2.cacheservice.DeleteCacheResponse); + + // Get or extend a reservation for a cache key + rpc GetOrExtendReservation(GetOrExtendReservationRequest) returns (flyteidl2.cacheservice.GetOrExtendReservationResponse); + + // Release the reservation for a cache key + rpc ReleaseReservation(ReleaseReservationRequest) returns (flyteidl2.cacheservice.ReleaseReservationResponse); +} + +/* + * Identifier for cache operations, including org, project, and domain. + * This is used to scope cache operations to specific organizational contexts. + */ +message Identifier { + string org = 1 [(buf.validate.field).string.min_len = 1]; // Organization identifier + string project = 2 [(buf.validate.field).string.min_len = 1]; // Project identifier + string domain = 3 [(buf.validate.field).string.min_len = 1]; // Domain identifier +} + +/* + * Request to retrieve cached data by key. + */ +message GetCacheRequest { + flyteidl2.cacheservice.GetCacheRequest base_request = 1; + Identifier identifier = 2 [(buf.validate.field).required = true]; // Identifier for the cache operation +} + +/* + * Request to store/update cached data by key. + */ +message PutCacheRequest { + flyteidl2.cacheservice.PutCacheRequest base_request = 1; + Identifier identifier = 2 [(buf.validate.field).required = true]; // Identifier for the cache operation +} + +/* + * Request to delete cached data by key. + */ +message DeleteCacheRequest { + flyteidl2.cacheservice.DeleteCacheRequest base_request = 1; + Identifier identifier = 2 [(buf.validate.field).required = true]; // Identifier for the cache operation +} + +/* + * Request to get or extend a reservation for a cache key + */ +message GetOrExtendReservationRequest { + flyteidl2.cacheservice.GetOrExtendReservationRequest base_request = 1; + Identifier identifier = 2 [(buf.validate.field).required = true]; // Identifier for the cache operation +} + +/* + * Request to release the reservation for a cache key + */ +message ReleaseReservationRequest { + flyteidl2.cacheservice.ReleaseReservationRequest base_request = 1; + Identifier identifier = 2 [(buf.validate.field).required = true]; // Identifier for the cache operation +} diff --git a/flyteidl2/clients/go/coreutils/extract_literal.go b/flyteidl2/clients/go/coreutils/extract_literal.go new file mode 100644 index 0000000000..aa8fde2b91 --- /dev/null +++ b/flyteidl2/clients/go/coreutils/extract_literal.go @@ -0,0 +1,107 @@ +// extract_literal.go +// Utility methods to extract a native golang value from a given Literal. +// Usage: +// 1] string literal extraction +// lit, _ := MakeLiteral("test_string") +// val, _ := ExtractFromLiteral(lit) +// 2] integer literal extraction. integer would be extracted in type int64. +// lit, _ := MakeLiteral([]interface{}{1, 2, 3}) +// val, _ := ExtractFromLiteral(lit) +// 3] float literal extraction. float would be extracted in type float64. +// lit, _ := MakeLiteral([]interface{}{1.0, 2.0, 3.0}) +// val, _ := ExtractFromLiteral(lit) +// 4] map of boolean literal extraction. +// mapInstance := map[string]interface{}{ +// "key1": []interface{}{1, 2, 3}, +// "key2": []interface{}{5}, +// } +// lit, _ := MakeLiteral(mapInstance) +// val, _ := ExtractFromLiteral(lit) +// For further examples check the test TestFetchLiteral in extract_literal_test.go + +package coreutils + +import ( + "fmt" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func ExtractFromLiteral(literal *core.Literal) (interface{}, error) { + switch literalValue := literal.Value.(type) { + case *core.Literal_Scalar: + switch scalarValue := literalValue.Scalar.Value.(type) { + case *core.Scalar_Primitive: + switch scalarPrimitive := scalarValue.Primitive.Value.(type) { + case *core.Primitive_Integer: + scalarPrimitiveInt := scalarPrimitive.Integer + return scalarPrimitiveInt, nil + case *core.Primitive_FloatValue: + scalarPrimitiveFloat := scalarPrimitive.FloatValue + return scalarPrimitiveFloat, nil + case *core.Primitive_StringValue: + scalarPrimitiveString := scalarPrimitive.StringValue + return scalarPrimitiveString, nil + case *core.Primitive_Boolean: + scalarPrimitiveBoolean := scalarPrimitive.Boolean + return scalarPrimitiveBoolean, nil + case *core.Primitive_Datetime: + scalarPrimitiveDateTime := scalarPrimitive.Datetime.AsTime() + return scalarPrimitiveDateTime, nil + case *core.Primitive_Duration: + scalarPrimitiveDuration := scalarPrimitive.Duration.AsDuration() + return scalarPrimitiveDuration, nil + default: + return nil, fmt.Errorf("unsupported literal scalar primitive type %T", scalarValue) + } + case *core.Scalar_Binary: + return scalarValue.Binary, nil + case *core.Scalar_Blob: + return scalarValue.Blob.Uri, nil + case *core.Scalar_Schema: + return scalarValue.Schema.Uri, nil + case *core.Scalar_Generic: + return scalarValue.Generic, nil + case *core.Scalar_StructuredDataset: + return scalarValue.StructuredDataset.Uri, nil + case *core.Scalar_Union: + // extract the value of the union but not the actual union object + extractedVal, err := ExtractFromLiteral(scalarValue.Union.Value) + if err != nil { + return nil, err + } + return extractedVal, nil + case *core.Scalar_NoneType: + return nil, nil + default: + return nil, fmt.Errorf("unsupported literal scalar type %T", scalarValue) + } + case *core.Literal_Collection: + collectionValue := literalValue.Collection.Literals + collection := make([]interface{}, len(collectionValue)) + for index, val := range collectionValue { + if collectionElem, err := ExtractFromLiteral(val); err == nil { + collection[index] = collectionElem + } else { + return nil, err + } + } + return collection, nil + case *core.Literal_Map: + mapLiteralValue := literalValue.Map.Literals + mapResult := make(map[string]interface{}, len(mapLiteralValue)) + for key, val := range mapLiteralValue { + if val, err := ExtractFromLiteral(val); err == nil { + mapResult[key] = val + } else { + return nil, err + } + } + return mapResult, nil + case *core.Literal_OffloadedMetadata: + // Return the URI of the offloaded metadata to be used when displaying in flytectl + return literalValue.OffloadedMetadata.Uri, nil + + } + return nil, fmt.Errorf("unsupported literal type %T", literal) +} diff --git a/flyteidl2/clients/go/coreutils/extract_literal_test.go b/flyteidl2/clients/go/coreutils/extract_literal_test.go new file mode 100644 index 0000000000..8781c6b3a5 --- /dev/null +++ b/flyteidl2/clients/go/coreutils/extract_literal_test.go @@ -0,0 +1,267 @@ +// extract_literal_test.go +// Test class for the utility methods which extract a native golang value from a flyte Literal. + +package coreutils + +import ( + "os" + "testing" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestFetchLiteral(t *testing.T) { + t.Run("Primitive", func(t *testing.T) { + lit, err := MakeLiteral("test_string") + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, "test_string", val) + }) + + t.Run("Timestamp", func(t *testing.T) { + now := time.Now().UTC() + lit, err := MakeLiteral(now) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, now, val) + }) + + t.Run("Duration", func(t *testing.T) { + duration := time.Second * 10 + lit, err := MakeLiteral(duration) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, duration, val) + }) + + t.Run("Array", func(t *testing.T) { + lit, err := MakeLiteral([]interface{}{1, 2, 3}) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + arr := []interface{}{int64(1), int64(2), int64(3)} + assert.Equal(t, arr, val) + }) + + t.Run("Map", func(t *testing.T) { + mapInstance := map[string]interface{}{ + "key1": []interface{}{1, 2, 3}, + "key2": []interface{}{5}, + } + lit, err := MakeLiteral(mapInstance) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + expectedMapInstance := map[string]interface{}{ + "key1": []interface{}{int64(1), int64(2), int64(3)}, + "key2": []interface{}{int64(5)}, + } + assert.Equal(t, expectedMapInstance, val) + }) + + t.Run("Map_Booleans", func(t *testing.T) { + mapInstance := map[string]interface{}{ + "key1": []interface{}{true, false, true}, + "key2": []interface{}{false}, + } + lit, err := MakeLiteral(mapInstance) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, mapInstance, val) + }) + + t.Run("Map_Floats", func(t *testing.T) { + mapInstance := map[string]interface{}{ + "key1": []interface{}{1.0, 2.0, 3.0}, + "key2": []interface{}{1.0}, + } + lit, err := MakeLiteral(mapInstance) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + expectedMapInstance := map[string]interface{}{ + "key1": []interface{}{float64(1.0), float64(2.0), float64(3.0)}, + "key2": []interface{}{float64(1.0)}, + } + assert.Equal(t, expectedMapInstance, val) + }) + + t.Run("NestedMap", func(t *testing.T) { + mapInstance := map[string]interface{}{ + "key1": map[string]interface{}{"key11": 1.0, "key12": 2.0, "key13": 3.0}, + "key2": map[string]interface{}{"key21": 1.0}, + } + lit, err := MakeLiteral(mapInstance) + assert.NoError(t, err) + val, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + expectedMapInstance := map[string]interface{}{ + "key1": map[string]interface{}{"key11": float64(1.0), "key12": float64(2.0), "key13": float64(3.0)}, + "key2": map[string]interface{}{"key21": float64(1.0)}, + } + assert.Equal(t, expectedMapInstance, val) + }) + + t.Run("Binary", func(t *testing.T) { + s := MakeBinaryLiteral([]byte{'h'}) + assert.Equal(t, []byte{'h'}, s.GetScalar().GetBinary().GetValue()) + _, err := ExtractFromLiteral(s) + assert.Nil(t, err) + }) + + t.Run("NoneType", func(t *testing.T) { + p, err := MakeLiteral(nil) + assert.NoError(t, err) + assert.NotNil(t, p.GetScalar()) + _, err = ExtractFromLiteral(p) + assert.Nil(t, err) + }) + + t.Run("Generic", func(t *testing.T) { + os.Setenv(FlyteUseOldDcFormat, "true") + literalVal := map[string]interface{}{ + "x": 1, + "y": "ystringvalue", + } + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} + lit, err := MakeLiteralForType(literalType, literalVal) + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + fieldsMap := map[string]*structpb.Value{ + "x": { + Kind: &structpb.Value_NumberValue{NumberValue: 1}, + }, + "y": { + Kind: &structpb.Value_StringValue{StringValue: "ystringvalue"}, + }, + } + expectedStructVal := &structpb.Struct{ + Fields: fieldsMap, + } + extractedStructValue := extractedLiteralVal.(*structpb.Struct) + assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) + for key, val := range expectedStructVal.Fields { + assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + } + os.Unsetenv(FlyteUseOldDcFormat) + }) + + t.Run("Generic Passed As String", func(t *testing.T) { + literalVal := "{\"x\": 1,\"y\": \"ystringvalue\"}" + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} + lit, err := MakeLiteralForType(literalType, literalVal) + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + fieldsMap := map[string]*structpb.Value{ + "x": { + Kind: &structpb.Value_NumberValue{NumberValue: 1}, + }, + "y": { + Kind: &structpb.Value_StringValue{StringValue: "ystringvalue"}, + }, + } + expectedStructVal := &structpb.Struct{ + Fields: fieldsMap, + } + extractedStructValue := extractedLiteralVal.(*structpb.Struct) + assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) + for key, val := range expectedStructVal.Fields { + assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + } + }) + + t.Run("Structured dataset", func(t *testing.T) { + literalVal := "s3://blah/blah/blah" + var dataSetColumns []*core.StructuredDatasetType_DatasetColumn + dataSetColumns = append(dataSetColumns, &core.StructuredDatasetType_DatasetColumn{ + Name: "Price", + LiteralType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_FLOAT, + }, + }, + }) + var literalType = &core.LiteralType{Type: &core.LiteralType_StructuredDatasetType{StructuredDatasetType: &core.StructuredDatasetType{ + Columns: dataSetColumns, + Format: "testFormat", + }}} + + lit, err := MakeLiteralForType(literalType, literalVal) + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, literalVal, extractedLiteralVal) + }) + + t.Run("Offloaded metadata", func(t *testing.T) { + literalVal := "s3://blah/blah/blah" + var storedLiteralType = &core.LiteralType{ + Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + } + offloadedLiteral := &core.Literal{ + Value: &core.Literal_OffloadedMetadata{ + OffloadedMetadata: &core.LiteralOffloadedMetadata{ + Uri: literalVal, + InferredType: storedLiteralType, + }, + }, + } + extractedLiteralVal, err := ExtractFromLiteral(offloadedLiteral) + assert.NoError(t, err) + assert.Equal(t, literalVal, extractedLiteralVal) + }) + + t.Run("Union", func(t *testing.T) { + literalVal := int64(1) + var literalType = &core.LiteralType{ + Type: &core.LiteralType_UnionType{ + UnionType: &core.UnionType{ + Variants: []*core.LiteralType{ + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}, + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_FLOAT}}, + }, + }, + }, + } + lit, err := MakeLiteralForType(literalType, literalVal) + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Equal(t, literalVal, extractedLiteralVal) + }) + + t.Run("Union with None", func(t *testing.T) { + var literalType = &core.LiteralType{ + Type: &core.LiteralType_UnionType{ + UnionType: &core.UnionType{ + Variants: []*core.LiteralType{ + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}, + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_NONE}}, + }, + }, + }, + } + lit, err := MakeLiteralForType(literalType, nil) + + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + assert.Nil(t, extractedLiteralVal) + }) +} diff --git a/flyteidl2/clients/go/coreutils/literals.go b/flyteidl2/clients/go/coreutils/literals.go new file mode 100644 index 0000000000..8a1f882a64 --- /dev/null +++ b/flyteidl2/clients/go/coreutils/literals.go @@ -0,0 +1,670 @@ +// Contains convenience methods for constructing core types. +package coreutils + +import ( + "encoding/json" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const MESSAGEPACK = "msgpack" +const FlyteUseOldDcFormat = "FLYTE_USE_OLD_DC_FORMAT" + +func MakePrimitive(v interface{}) (*core.Primitive, error) { + switch p := v.(type) { + case int: + return &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: int64(p), + }, + }, nil + case int64: + return &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: p, + }, + }, nil + case float64: + return &core.Primitive{ + Value: &core.Primitive_FloatValue{ + FloatValue: p, + }, + }, nil + case time.Time: + t, err := ptypes.TimestampProto(p) + if err != nil { + return nil, err + } + return &core.Primitive{ + Value: &core.Primitive_Datetime{ + Datetime: t, + }, + }, nil + case time.Duration: + d := ptypes.DurationProto(p) + return &core.Primitive{ + Value: &core.Primitive_Duration{ + Duration: d, + }, + }, nil + case string: + return &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: p, + }, + }, nil + case bool: + return &core.Primitive{ + Value: &core.Primitive_Boolean{ + Boolean: p, + }, + }, nil + } + return nil, fmt.Errorf("failed to convert to a known primitive type. Input Type [%v] not supported", reflect.TypeOf(v).String()) +} + +func MustMakePrimitive(v interface{}) *core.Primitive { + f, err := MakePrimitive(v) + if err != nil { + panic(err) + } + return f +} + +func MakePrimitiveLiteral(v interface{}) (*core.Literal, error) { + p, err := MakePrimitive(v) + if err != nil { + return nil, err + } + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: p, + }, + }, + }, + }, nil +} + +func MustMakePrimitiveLiteral(v interface{}) *core.Literal { + p, err := MakePrimitiveLiteral(v) + if err != nil { + panic(err) + } + return p +} + +func MakeLiteralForMap(v map[string]interface{}) (*core.Literal, error) { + m, err := MakeLiteralMap(v) + if err != nil { + return nil, err + } + + return &core.Literal{ + Value: &core.Literal_Map{ + Map: m, + }, + }, nil +} + +func MakeLiteralForCollection(v []interface{}) (*core.Literal, error) { + literals := make([]*core.Literal, 0, len(v)) + for _, val := range v { + l, err := MakeLiteral(val) + if err != nil { + return nil, err + } + + literals = append(literals, l) + } + + return &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: literals, + }, + }, + }, nil +} + +func MakeBinaryLiteral(v []byte) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: v, + Tag: MESSAGEPACK, + }, + }, + }, + }, + } +} + +func MakeGenericLiteral(v *structpb.Struct) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Generic{ + Generic: v, + }, + }, + }} +} + +func MakeLiteral(v interface{}) (*core.Literal, error) { + if v == nil { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_NoneType{ + NoneType: &core.Void{}, + }, + }, + }, + }, nil + } + switch o := v.(type) { + case *core.Literal: + return o, nil + case []interface{}: + return MakeLiteralForCollection(o) + case map[string]interface{}: + return MakeLiteralForMap(o) + case []byte: + return MakeBinaryLiteral(v.([]byte)), nil + case *structpb.Struct: + return MakeGenericLiteral(v.(*structpb.Struct)), nil + case *core.Error: + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Error{ + Error: v.(*core.Error), + }, + }, + }, + }, nil + default: + return MakePrimitiveLiteral(o) + } +} + +func MustMakeDefaultLiteralForType(typ *core.LiteralType) *core.Literal { + if res, err := MakeDefaultLiteralForType(typ); err != nil { + panic(err) + } else { + return res + } +} + +func MakeDefaultLiteralForType(typ *core.LiteralType) (*core.Literal, error) { + switch t := typ.GetType().(type) { + case *core.LiteralType_Simple: + switch t.Simple { + case core.SimpleType_NONE: + return MakeLiteral(nil) + case core.SimpleType_INTEGER: + return MakeLiteral(int(0)) + case core.SimpleType_FLOAT: + return MakeLiteral(float64(0)) + case core.SimpleType_STRING: + return MakeLiteral("") + case core.SimpleType_BOOLEAN: + return MakeLiteral(false) + case core.SimpleType_DATETIME: + return MakeLiteral(time.Now()) + case core.SimpleType_DURATION: + return MakeLiteral(time.Second) + case core.SimpleType_BINARY: + return MakeLiteral([]byte{}) + case core.SimpleType_ERROR: + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Error{ + Error: &core.Error{ + Message: "Default Error message", + }, + }, + }, + }, + }, nil + case core.SimpleType_STRUCT: + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Generic{ + Generic: &structpb.Struct{}, + }, + }, + }, + }, nil + } + return nil, errors.Errorf("Not yet implemented. Default creation is not yet implemented for [%s] ", t.Simple.String()) + case *core.LiteralType_Blob: + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Blob{ + Blob: &core.Blob{ + Metadata: &core.BlobMetadata{ + Type: t.Blob, + }, + Uri: "/tmp/somepath", + }, + }, + }, + }, + }, nil + case *core.LiteralType_CollectionType: + single, err := MakeDefaultLiteralForType(t.CollectionType) + if err != nil { + return nil, err + } + + return &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{single}, + }, + }, + }, nil + case *core.LiteralType_MapValueType: + single, err := MakeDefaultLiteralForType(t.MapValueType) + if err != nil { + return nil, err + } + + return &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "itemKey": single, + }, + }, + }, + }, nil + case *core.LiteralType_EnumType: + return MakeLiteralForType(typ, nil) + case *core.LiteralType_Schema: + return MakeLiteralForType(typ, nil) + case *core.LiteralType_UnionType: + if len(t.UnionType.Variants) == 0 { + return nil, errors.Errorf("Union type must have at least one variant") + } + // For union types, we just return the default for the first variant + val, err := MakeDefaultLiteralForType(t.UnionType.Variants[0]) + if err != nil { + return nil, errors.Errorf("Failed to create default literal for first union type variant [%v]", t.UnionType.Variants[0]) + } + res := &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Union{ + Union: &core.Union{ + Type: t.UnionType.Variants[0], + Value: val, + }, + }, + }, + }, + } + return res, nil + } + + return nil, fmt.Errorf("failed to convert to a known Literal. Input Type [%v] not supported", typ.String()) +} + +func MakePrimitiveForType(t core.SimpleType, s string) (*core.Primitive, error) { + p := &core.Primitive{} + switch t { + case core.SimpleType_INTEGER: + v, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return nil, errors.Wrap(err, "failed to parse integer value") + } + p.Value = &core.Primitive_Integer{Integer: v} + case core.SimpleType_FLOAT: + v, err := strconv.ParseFloat(s, 64) + if err != nil { + return nil, errors.Wrap(err, "failed to parse Float value") + } + p.Value = &core.Primitive_FloatValue{FloatValue: v} + case core.SimpleType_BOOLEAN: + v, err := strconv.ParseBool(s) + if err != nil { + return nil, errors.Wrap(err, "failed to parse Bool value") + } + p.Value = &core.Primitive_Boolean{Boolean: v} + case core.SimpleType_STRING: + p.Value = &core.Primitive_StringValue{StringValue: s} + case core.SimpleType_DURATION: + v, err := time.ParseDuration(s) + if err != nil { + return nil, errors.Wrap(err, "failed to parse Duration, valid formats: e.g. 300ms, -1.5h, 2h45m") + } + p.Value = &core.Primitive_Duration{Duration: ptypes.DurationProto(v)} + case core.SimpleType_DATETIME: + v, err := time.Parse(time.RFC3339, s) + if err != nil { + return nil, errors.Wrap(err, "failed to parse Datetime in RFC3339 format") + } + ts, err := ptypes.TimestampProto(v) + if err != nil { + return nil, errors.Wrap(err, "failed to convert datetime to proto") + } + p.Value = &core.Primitive_Datetime{Datetime: ts} + default: + return nil, fmt.Errorf("unsupported type %s", t.String()) + } + return p, nil +} + +func MakeLiteralForSimpleType(t core.SimpleType, s string) (*core.Literal, error) { + s = strings.Trim(s, " \n\t") + scalar := &core.Scalar{} + switch t { + case core.SimpleType_STRUCT: + st := &structpb.Struct{} + unmarshaler := jsonpb.Unmarshaler{AllowUnknownFields: true} + err := unmarshaler.Unmarshal(strings.NewReader(s), st) + if err != nil { + return nil, errors.Wrapf(err, "failed to load generic type as json.") + } + scalar.Value = &core.Scalar_Generic{ + Generic: st, + } + case core.SimpleType_BINARY: + scalar.Value = &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: []byte(s), + Tag: MESSAGEPACK, + }, + } + case core.SimpleType_ERROR: + scalar.Value = &core.Scalar_Error{ + Error: &core.Error{ + Message: s, + }, + } + case core.SimpleType_NONE: + scalar.Value = &core.Scalar_NoneType{ + NoneType: &core.Void{}, + } + default: + p, err := MakePrimitiveForType(t, s) + if err != nil { + return nil, err + } + scalar.Value = &core.Scalar_Primitive{Primitive: p} + } + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: scalar, + }, + }, nil +} + +func MustMakeLiteral(v interface{}) *core.Literal { + p, err := MakeLiteral(v) + if err != nil { + panic(err) + } + + return p +} + +func MakeLiteralMap(v map[string]interface{}) (*core.LiteralMap, error) { + + literals := make(map[string]*core.Literal, len(v)) + for key, val := range v { + l, err := MakeLiteral(val) + if err != nil { + return nil, err + } + + literals[key] = l + } + + return &core.LiteralMap{ + Literals: literals, + }, nil +} + +func MakeLiteralForSchema(path storage.DataReference, columns []*core.SchemaType_SchemaColumn) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Schema{ + Schema: &core.Schema{ + Uri: path.String(), + Type: &core.SchemaType{ + Columns: columns, + }, + }, + }, + }, + }, + } +} + +func MakeLiteralForStructuredDataSet(path storage.DataReference, columns []*core.StructuredDatasetType_DatasetColumn, format string) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: path.String(), + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Columns: columns, + Format: format, + }, + }, + }, + }, + }, + }, + } +} + +func MakeLiteralForBlob(path storage.DataReference, isDir bool, format string) *core.Literal { + dim := core.BlobType_SINGLE + if isDir { + dim = core.BlobType_MULTIPART + } + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Blob{ + Blob: &core.Blob{ + Uri: path.String(), + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: dim, + Format: format, + }, + }, + }, + }, + }, + }, + } +} + +func MakeLiteralForType(t *core.LiteralType, v interface{}) (*core.Literal, error) { + l := &core.Literal{} + switch newT := t.Type.(type) { + case *core.LiteralType_MapValueType: + newV, ok := v.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("map value types can only be of type map[string]interface{}, but found %v", reflect.TypeOf(v)) + } + + literals := make(map[string]*core.Literal, len(newV)) + for key, val := range newV { + lv, err := MakeLiteralForType(newT.MapValueType, val) + if err != nil { + return nil, err + } + literals[key] = lv + } + l.Value = &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: literals, + }, + } + + case *core.LiteralType_CollectionType: + newV, ok := v.([]interface{}) + if !ok { + return nil, fmt.Errorf("collection type expected but found %v", reflect.TypeOf(v)) + } + + literals := make([]*core.Literal, 0, len(newV)) + for _, val := range newV { + lv, err := MakeLiteralForType(newT.CollectionType, val) + if err != nil { + return nil, err + } + literals = append(literals, lv) + } + l.Value = &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: literals, + }, + } + + case *core.LiteralType_Simple: + strValue := fmt.Sprintf("%v", v) + if v == nil { + strValue = "" + } + // Note this is to support large integers which by default when passed from an unmarshalled json will be + // converted to float64 and printed as exponential format by Sprintf. + // eg : 8888888 get converted to 8.888888e+06 and which causes strconv.ParseInt to fail + // Inorder to avoid this we explicitly add this check. + if f, ok := v.(float64); ok && math.Trunc(f) == f { + strValue = fmt.Sprintf("%.0f", math.Trunc(f)) + } + if newT.Simple == core.SimpleType_STRUCT { + useOldFormat := strings.ToLower(os.Getenv(FlyteUseOldDcFormat)) + if _, isValueStringType := v.(string); !isValueStringType { + if useOldFormat == "1" || useOldFormat == "t" || useOldFormat == "true" { + byteValue, err := json.Marshal(v) + if err != nil { + return nil, fmt.Errorf("unable to marshal to json string for struct value %v", v) + } + strValue = string(byteValue) + } else { + byteValue, err := msgpack.Marshal(v) + if err != nil { + return nil, fmt.Errorf("unable to marshal to msgpack bytes for struct value %v", v) + } + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: byteValue, + Tag: MESSAGEPACK, + }, + }, + }, + }, + }, nil + } + } + } + lv, err := MakeLiteralForSimpleType(newT.Simple, strValue) + if err != nil { + return nil, err + } + return lv, nil + + case *core.LiteralType_Blob: + isDir := newT.Blob.Dimensionality == core.BlobType_MULTIPART + lv := MakeLiteralForBlob(storage.DataReference(fmt.Sprintf("%v", v)), isDir, newT.Blob.Format) + return lv, nil + + case *core.LiteralType_Schema: + lv := MakeLiteralForSchema(storage.DataReference(fmt.Sprintf("%v", v)), newT.Schema.Columns) + return lv, nil + case *core.LiteralType_StructuredDatasetType: + lv := MakeLiteralForStructuredDataSet(storage.DataReference(fmt.Sprintf("%v", v)), newT.StructuredDatasetType.Columns, newT.StructuredDatasetType.Format) + return lv, nil + + case *core.LiteralType_EnumType: + var newV string + if v == nil { + if len(t.GetEnumType().Values) == 0 { + return nil, fmt.Errorf("enum types need at least one value") + } + newV = t.GetEnumType().Values[0] + } else { + var ok bool + newV, ok = v.(string) + if !ok { + return nil, fmt.Errorf("cannot convert [%v] to enum representations, only string values are supported in enum literals", reflect.TypeOf(v)) + } + found := false + for _, val := range t.GetEnumType().GetValues() { + if val == newV { + found = true + break + } + } + if !found { + return nil, fmt.Errorf("incorrect enum value [%s], supported values %+v", newV, t.GetEnumType().GetValues()) + } + } + return MakePrimitiveLiteral(newV) + + case *core.LiteralType_UnionType: + // Try different types in the variants, return the first one matched + found := false + for _, subType := range newT.UnionType.Variants { + lv, err := MakeLiteralForType(subType, v) + if err == nil { + l = &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Union{ + Union: &core.Union{ + Value: lv, + Type: subType, + }, + }, + }, + }, + } + found = true + break + } + } + if !found { + return nil, fmt.Errorf("incorrect union value [%s], supported values %+v", v, newT.UnionType.Variants) + } + default: + return nil, fmt.Errorf("unsupported type %s", t.String()) + } + + return l, nil +} diff --git a/flyteidl2/clients/go/coreutils/literals_test.go b/flyteidl2/clients/go/coreutils/literals_test.go new file mode 100644 index 0000000000..fecb19840c --- /dev/null +++ b/flyteidl2/clients/go/coreutils/literals_test.go @@ -0,0 +1,868 @@ +// extract_literal_test.go +// Test class for the utility methods which construct flyte literals. + +package coreutils + +import ( + "fmt" + "os" + "reflect" + "strconv" + "testing" + "time" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/go-test/deep" + "github.com/golang/protobuf/ptypes" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestMakePrimitive(t *testing.T) { + { + v := 1 + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String()) + assert.Equal(t, int64(v), p.GetInteger()) + } + { + v := int64(1) + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(p.Value).String()) + assert.Equal(t, v, p.GetInteger()) + } + { + v := 1.0 + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.Value).String()) + assert.Equal(t, v, p.GetFloatValue()) + } + { + v := "blah" + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(p.Value).String()) + assert.Equal(t, v, p.GetStringValue()) + } + { + v := true + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_Boolean", reflect.TypeOf(p.Value).String()) + assert.Equal(t, v, p.GetBoolean()) + } + { + v := time.Now() + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_Datetime", reflect.TypeOf(p.Value).String()) + j, err := ptypes.TimestampProto(v) + assert.NoError(t, err) + assert.Equal(t, j, p.GetDatetime()) + _, err = MakePrimitive(time.Date(0, 0, 0, 0, 0, 0, 0, time.UTC)) + assert.Error(t, err) + } + { + v := time.Second * 10 + p, err := MakePrimitive(v) + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String()) + assert.Equal(t, ptypes.DurationProto(v), p.GetDuration()) + } + { + v := struct { + }{} + _, err := MakePrimitive(v) + assert.Error(t, err) + } +} + +func TestMustMakePrimitive(t *testing.T) { + { + v := struct { + }{} + assert.Panics(t, func() { + MustMakePrimitive(v) + }) + } + { + v := time.Second * 10 + p := MustMakePrimitive(v) + assert.Equal(t, "*core.Primitive_Duration", reflect.TypeOf(p.Value).String()) + assert.Equal(t, ptypes.DurationProto(v), p.GetDuration()) + } +} + +func TestMakePrimitiveLiteral(t *testing.T) { + { + v := 1.0 + p, err := MakePrimitiveLiteral(v) + assert.NoError(t, err) + assert.NotNil(t, p.GetScalar()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) + } + { + v := struct { + }{} + _, err := MakePrimitiveLiteral(v) + assert.Error(t, err) + } +} + +func TestMustMakePrimitiveLiteral(t *testing.T) { + t.Run("Panic", func(t *testing.T) { + v := struct { + }{} + assert.Panics(t, func() { + MustMakePrimitiveLiteral(v) + }) + }) + t.Run("FloatValue", func(t *testing.T) { + v := 1.0 + p := MustMakePrimitiveLiteral(v) + assert.NotNil(t, p.GetScalar()) + assert.Equal(t, "*core.Primitive_FloatValue", reflect.TypeOf(p.GetScalar().GetPrimitive().Value).String()) + assert.Equal(t, v, p.GetScalar().GetPrimitive().GetFloatValue()) + }) +} + +func TestMakeLiteral(t *testing.T) { + t.Run("Primitive", func(t *testing.T) { + lit, err := MakeLiteral("test_string") + assert.NoError(t, err) + assert.Equal(t, "*core.Primitive_StringValue", reflect.TypeOf(lit.GetScalar().GetPrimitive().Value).String()) + }) + + t.Run("Array", func(t *testing.T) { + lit, err := MakeLiteral([]interface{}{1, 2, 3}) + assert.NoError(t, err) + assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetValue()).String()) + assert.Equal(t, "*core.Primitive_Integer", reflect.TypeOf(lit.GetCollection().Literals[0].GetScalar().GetPrimitive().Value).String()) + }) + + t.Run("Map", func(t *testing.T) { + lit, err := MakeLiteral(map[string]interface{}{ + "key1": []interface{}{1, 2, 3}, + "key2": []interface{}{5}, + }) + assert.NoError(t, err) + assert.Equal(t, "*core.Literal_Map", reflect.TypeOf(lit.GetValue()).String()) + assert.Equal(t, "*core.Literal_Collection", reflect.TypeOf(lit.GetMap().Literals["key1"].GetValue()).String()) + }) + + t.Run("Binary", func(t *testing.T) { + s := MakeBinaryLiteral([]byte{'h'}) + assert.Equal(t, []byte{'h'}, s.GetScalar().GetBinary().GetValue()) + }) + + t.Run("NoneType", func(t *testing.T) { + p, err := MakeLiteral(nil) + assert.NoError(t, err) + assert.NotNil(t, p.GetScalar()) + assert.Equal(t, "*core.Scalar_NoneType", reflect.TypeOf(p.GetScalar().Value).String()) + }) +} + +func TestMustMakeLiteral(t *testing.T) { + v := "hello" + l := MustMakeLiteral(v) + assert.NotNil(t, l.GetScalar()) + assert.Equal(t, v, l.GetScalar().GetPrimitive().GetStringValue()) +} + +func TestMakeBinaryLiteral(t *testing.T) { + s := MakeBinaryLiteral([]byte{'h'}) + assert.Equal(t, []byte{'h'}, s.GetScalar().GetBinary().GetValue()) +} + +func TestMakeDefaultLiteralForType(t *testing.T) { + type args struct { + name string + ty core.SimpleType + tyName string + isPrimitive bool + } + tests := []args{ + {"None", core.SimpleType_NONE, "*core.Scalar_NoneType", false}, + {"Binary", core.SimpleType_BINARY, "*core.Scalar_Binary", false}, + {"Integer", core.SimpleType_INTEGER, "*core.Primitive_Integer", true}, + {"Float", core.SimpleType_FLOAT, "*core.Primitive_FloatValue", true}, + {"String", core.SimpleType_STRING, "*core.Primitive_StringValue", true}, + {"Boolean", core.SimpleType_BOOLEAN, "*core.Primitive_Boolean", true}, + {"Duration", core.SimpleType_DURATION, "*core.Primitive_Duration", true}, + {"Datetime", core.SimpleType_DATETIME, "*core.Primitive_Datetime", true}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{Simple: test.ty}}) + assert.NoError(t, err) + if test.isPrimitive { + assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().GetPrimitive().Value).String()) + } else { + assert.Equal(t, test.tyName, reflect.TypeOf(l.GetScalar().Value).String()) + } + }) + } + + t.Run("Binary", func(t *testing.T) { + s, err := MakeLiteral([]byte{'h'}) + assert.NoError(t, err) + assert.Equal(t, []byte{'h'}, s.GetScalar().GetBinary().GetValue()) + }) + + t.Run("Blob", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}}) + assert.NoError(t, err) + assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String()) + }) + + t.Run("Collection", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_CollectionType{CollectionType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}}) + assert.NoError(t, err) + assert.Equal(t, "*core.LiteralCollection", reflect.TypeOf(l.GetCollection()).String()) + }) + + t.Run("Map", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_MapValueType{MapValueType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}}) + assert.NoError(t, err) + assert.Equal(t, "*core.LiteralMap", reflect.TypeOf(l.GetMap()).String()) + }) + + t.Run("error", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_ERROR, + }}) + assert.NoError(t, err) + assert.NotNil(t, l.GetScalar().GetError()) + }) + + t.Run("binary", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_BINARY, + }}) + assert.NoError(t, err) + assert.NotNil(t, l.GetScalar().GetBinary()) + assert.NotNil(t, l.GetScalar().GetBinary().GetValue()) + assert.NotNil(t, l.GetScalar().GetBinary().GetTag()) + }) + + t.Run("struct", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_STRUCT, + }}) + assert.NoError(t, err) + assert.NotNil(t, l.GetScalar().GetGeneric()) + }) + + t.Run("enum", func(t *testing.T) { + l, err := MakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_EnumType{ + EnumType: &core.EnumType{Values: []string{"x", "y", "z"}}, + }}) + assert.NoError(t, err) + assert.NotNil(t, l.GetScalar().GetPrimitive().GetStringValue()) + expected := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "x"}}}}}} + assert.Equal(t, expected, l) + }) + + t.Run("union", func(t *testing.T) { + l, err := MakeDefaultLiteralForType( + &core.LiteralType{ + Type: &core.LiteralType_UnionType{ + UnionType: &core.UnionType{ + Variants: []*core.LiteralType{ + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}, + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_FLOAT}}, + }, + }, + }, + }, + ) + assert.NoError(t, err) + assert.Equal(t, "*core.Union", reflect.TypeOf(l.GetScalar().GetUnion()).String()) + }) +} + +func TestMustMakeDefaultLiteralForType(t *testing.T) { + t.Run("error", func(t *testing.T) { + assert.Panics(t, func() { + MustMakeDefaultLiteralForType(nil) + }) + }) + + t.Run("Blob", func(t *testing.T) { + l := MustMakeDefaultLiteralForType(&core.LiteralType{Type: &core.LiteralType_Blob{}}) + assert.Equal(t, "*core.Scalar_Blob", reflect.TypeOf(l.GetScalar().Value).String()) + }) +} + +func TestMakePrimitiveForType(t *testing.T) { + n := time.Now() + type args struct { + t core.SimpleType + s string + } + tests := []struct { + name string + args args + want *core.Primitive + wantErr bool + }{ + {"error-type", args{core.SimpleType_NONE, "x"}, nil, true}, + + {"error-int", args{core.SimpleType_INTEGER, "x"}, nil, true}, + {"int", args{core.SimpleType_INTEGER, "1"}, MustMakePrimitive(1), false}, + + {"error-bool", args{core.SimpleType_BOOLEAN, "x"}, nil, true}, + {"bool", args{core.SimpleType_BOOLEAN, "true"}, MustMakePrimitive(true), false}, + + {"error-float", args{core.SimpleType_FLOAT, "x"}, nil, true}, + {"float", args{core.SimpleType_FLOAT, "3.1416"}, MustMakePrimitive(3.1416), false}, + + {"string", args{core.SimpleType_STRING, "string"}, MustMakePrimitive("string"), false}, + + {"error-dt", args{core.SimpleType_DATETIME, "x"}, nil, true}, + {"dt", args{core.SimpleType_DATETIME, n.Format(time.RFC3339Nano)}, MustMakePrimitive(n), false}, + + {"error-dur", args{core.SimpleType_DURATION, "x"}, nil, true}, + {"dur", args{core.SimpleType_DURATION, time.Hour.String()}, MustMakePrimitive(time.Hour), false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := MakePrimitiveForType(tt.args.t, tt.args.s) + if (err != nil) != tt.wantErr { + t.Errorf("MakePrimitiveForType() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("MakePrimitiveForType() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMakeLiteralForSimpleType(t *testing.T) { + type args struct { + t core.SimpleType + s string + } + tests := []struct { + name string + args args + want *core.Literal + wantErr bool + }{ + {"error-int", args{core.SimpleType_INTEGER, "x"}, nil, true}, + {"int", args{core.SimpleType_INTEGER, "1"}, MustMakeLiteral(1), false}, + + {"error-struct", args{core.SimpleType_STRUCT, "x"}, nil, true}, + {"struct", args{core.SimpleType_STRUCT, `{"x": 1}`}, MustMakeLiteral(&structpb.Struct{Fields: map[string]*structpb.Value{"x": {Kind: &structpb.Value_NumberValue{NumberValue: 1}}}}), false}, + + {"bin", args{core.SimpleType_BINARY, "x"}, MustMakeLiteral([]byte("x")), false}, + + {"error", args{core.SimpleType_ERROR, "err"}, MustMakeLiteral(&core.Error{Message: "err"}), false}, + + {"none", args{core.SimpleType_NONE, "null"}, MustMakeLiteral(nil), false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := MakeLiteralForSimpleType(tt.args.t, tt.args.s) + if (err != nil) != tt.wantErr { + t.Errorf("MakeLiteralForSimpleType() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(tt.want, got); diff != nil { + t.Errorf("MakeLiteralForSimpleType() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMakeLiteralForBlob(t *testing.T) { + type args struct { + path storage.DataReference + isDir bool + format string + } + tests := []struct { + name string + args args + want *core.Blob + }{ + {"simple-key", args{path: "/key", isDir: false, format: "xyz"}, &core.Blob{Uri: "/key", Metadata: &core.BlobMetadata{Type: &core.BlobType{Format: "xyz", Dimensionality: core.BlobType_SINGLE}}}}, + {"simple-dir", args{path: "/key", isDir: true, format: "xyz"}, &core.Blob{Uri: "/key", Metadata: &core.BlobMetadata{Type: &core.BlobType{Format: "xyz", Dimensionality: core.BlobType_MULTIPART}}}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := MakeLiteralForBlob(tt.args.path, tt.args.isDir, tt.args.format); !reflect.DeepEqual(got.GetScalar().GetBlob(), tt.want) { + t.Errorf("MakeLiteralForBlob() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestMakeLiteralForType(t *testing.T) { + t.Run("SimpleInteger", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}} + val, err := MakeLiteralForType(literalType, 1) + assert.NoError(t, err) + literalVal := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_Integer{Integer: 1}}}}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("IntegerComingInAsFloatOverFlow", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}} + _, err := MakeLiteralForType(literalType, 8.888888e+19) + assert.NotNil(t, err) + numError := &strconv.NumError{ + Func: "ParseInt", + Num: "88888880000000000000", + Err: fmt.Errorf("value out of range"), + } + parseIntError := errors.WithMessage(numError, "failed to parse integer value") + assert.Equal(t, errors.WithStack(parseIntError).Error(), err.Error()) + }) + + t.Run("IntegerComingInAsFloat", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}} + val, err := MakeLiteralForType(literalType, 8.888888e+18) + assert.NoError(t, err) + literalVal := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_Integer{Integer: 8.888888e+18}}}}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("SimpleFloat", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_FLOAT}} + val, err := MakeLiteralForType(literalType, 1) + assert.NoError(t, err) + literalVal := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_FloatValue{FloatValue: 1.0}}}}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("Generic", func(t *testing.T) { + os.Setenv(FlyteUseOldDcFormat, "true") + literalVal := map[string]interface{}{ + "x": 1, + "y": "ystringvalue", + } + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} + lit, err := MakeLiteralForType(literalType, literalVal) + assert.NoError(t, err) + extractedLiteralVal, err := ExtractFromLiteral(lit) + assert.NoError(t, err) + fieldsMap := map[string]*structpb.Value{ + "x": { + Kind: &structpb.Value_NumberValue{NumberValue: 1}, + }, + "y": { + Kind: &structpb.Value_StringValue{StringValue: "ystringvalue"}, + }, + } + expectedStructVal := &structpb.Struct{ + Fields: fieldsMap, + } + extractedStructValue := extractedLiteralVal.(*structpb.Struct) + assert.Equal(t, len(expectedStructVal.Fields), len(extractedStructValue.Fields)) + for key, val := range expectedStructVal.Fields { + assert.Equal(t, val.Kind, extractedStructValue.Fields[key].Kind) + } + os.Unsetenv(FlyteUseOldDcFormat) + }) + + t.Run("SimpleBinary", func(t *testing.T) { + // We compare the deserialized values instead of the raw msgpack bytes because Go does not guarantee the order + // of map keys during serialization. This means that while the serialized bytes may differ, the deserialized + // values should be logically equivalent. + + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRUCT}} + v := map[string]interface{}{ + "a": int64(1), + "b": 3.14, + "c": "example_string", + "d": map[string]interface{}{ + "1": int64(100), + "2": int64(200), + }, + "e": map[string]interface{}{ + "a": int64(1), + "b": 3.14, + }, + "f": []string{"a", "b", "c"}, + } + + val, err := MakeLiteralForType(literalType, v) + assert.NoError(t, err) + + msgpackBytes, err := msgpack.Marshal(v) + assert.NoError(t, err) + + literalVal := &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: msgpackBytes, + Tag: MESSAGEPACK, + }, + }, + }, + }, + } + + expectedLiteralVal, err := ExtractFromLiteral(literalVal) + assert.NoError(t, err) + actualLiteralVal, err := ExtractFromLiteral(val) + assert.NoError(t, err) + + // Check if the extracted value is of type *core.Binary (not []byte) + expectedBinary, ok := expectedLiteralVal.(*core.Binary) + assert.True(t, ok, "expectedLiteralVal is not of type *core.Binary") + actualBinary, ok := actualLiteralVal.(*core.Binary) + assert.True(t, ok, "actualLiteralVal is not of type *core.Binary") + + // Now check if the Binary values match + var expectedVal, actualVal map[string]interface{} + err = msgpack.Unmarshal(expectedBinary.Value, &expectedVal) + assert.NoError(t, err) + err = msgpack.Unmarshal(actualBinary.Value, &actualVal) + assert.NoError(t, err) + + // Finally, assert that the deserialized values are equal + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("ArrayStrings", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + strArray := []interface{}{"hello", "world"} + val, err := MakeLiteralForType(literalType, strArray) + assert.NoError(t, err) + literalVal1 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "hello"}}}}}} + literalVal2 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world"}}}}}} + literalCollection := []*core.Literal{literalVal1, literalVal2} + literalVal := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("ArrayOfArrayStringsNotSupported", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + strArrayOfArray := [][]interface{}{{"hello1", "world1"}, {"hello2", "world2"}} + _, err := MakeLiteralForType(literalType, strArrayOfArray) + expectedErrorf := fmt.Errorf("collection type expected but found [][]interface {}") + assert.Equal(t, expectedErrorf, err) + }) + + t.Run("ArrayOfArrayStringsTypeErasure", func(t *testing.T) { + var collectionType = &core.LiteralType{Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + var literalType = &core.LiteralType{Type: &core.LiteralType_CollectionType{ + CollectionType: collectionType}} + + createList1 := func() interface{} { + return []interface{}{"hello1", "world1"} + } + createList2 := func() interface{} { + return []interface{}{"hello2", "world2"} + } + createNestedList := func() interface{} { + return []interface{}{createList1(), createList2()} + } + var strArrayOfArray = createNestedList() + val, err := MakeLiteralForType(literalType, strArrayOfArray) + assert.NoError(t, err) + literalVal11 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "hello1"}}}}}} + literalVal12 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world1"}}}}}} + literalCollection1Val := []*core.Literal{literalVal11, literalVal12} + + literalCollection1 := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection1Val}}} + + literalVal21 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "hello2"}}}}}} + literalVal22 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world2"}}}}}} + literalCollection2Val := []*core.Literal{literalVal21, literalVal22} + literalCollection2 := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection2Val}}} + literalCollection := []*core.Literal{literalCollection1, literalCollection2} + + literalVal := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("MapStrings", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_MapValueType{ + MapValueType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + mapVal := map[string]interface{}{"hello1": "world1", "hello2": "world2"} + val, err := MakeLiteralForType(literalType, mapVal) + assert.NoError(t, err) + literalVal1 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world1"}}}}}} + literalVal2 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world2"}}}}}} + literalMapVal := map[string]*core.Literal{"hello1": literalVal1, "hello2": literalVal2} + literalVal := &core.Literal{Value: &core.Literal_Map{Map: &core.LiteralMap{Literals: literalMapVal}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("MapArrayOfStringsFail", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_MapValueType{ + MapValueType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + strArray := map[string][]interface{}{"hello1": {"world11", "world12"}, "hello2": {"world21", "world22"}} + _, err := MakeLiteralForType(literalType, strArray) + expectedErrorf := fmt.Errorf("map value types can only be of type map[string]interface{}, but found map[string][]interface {}") + assert.Equal(t, expectedErrorf, err) + }) + + t.Run("MapArrayOfStringsTypeErasure", func(t *testing.T) { + var collectionType = &core.LiteralType{Type: &core.LiteralType_CollectionType{ + CollectionType: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}}}} + var literalType = &core.LiteralType{Type: &core.LiteralType_MapValueType{ + MapValueType: collectionType}} + createList1 := func() interface{} { + return []interface{}{"world11", "world12"} + } + createList2 := func() interface{} { + return []interface{}{"world21", "world22"} + } + strArray := map[string]interface{}{"hello1": createList1(), "hello2": createList2()} + val, err := MakeLiteralForType(literalType, strArray) + assert.NoError(t, err) + literalVal11 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world11"}}}}}} + literalVal12 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world12"}}}}}} + literalCollection1 := []*core.Literal{literalVal11, literalVal12} + literalVal1 := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection1}}} + literalVal21 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world21"}}}}}} + literalVal22 := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "world22"}}}}}} + literalCollection2 := []*core.Literal{literalVal21, literalVal22} + literalVal2 := &core.Literal{Value: &core.Literal_Collection{Collection: &core.LiteralCollection{Literals: literalCollection2}}} + literalMapVal := map[string]*core.Literal{"hello1": literalVal1, "hello2": literalVal2} + literalVal := &core.Literal{Value: &core.Literal_Map{Map: &core.LiteralMap{Literals: literalMapVal}}} + expectedVal, _ := ExtractFromLiteral(literalVal) + actualVal, _ := ExtractFromLiteral(val) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("Schema", func(t *testing.T) { + var schemaColumns []*core.SchemaType_SchemaColumn + schemaColumns = append(schemaColumns, &core.SchemaType_SchemaColumn{ + Name: "Price", + Type: core.SchemaType_SchemaColumn_FLOAT, + }) + var literalType = &core.LiteralType{Type: &core.LiteralType_Schema{Schema: &core.SchemaType{ + Columns: schemaColumns, + }}} + + expectedLV := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Schema{ + Schema: &core.Schema{ + Uri: "s3://blah/blah/blah", + Type: &core.SchemaType{ + Columns: schemaColumns, + }, + }, + }, + }}} + lv, err := MakeLiteralForType(literalType, "s3://blah/blah/blah") + assert.NoError(t, err) + + assert.Equal(t, expectedLV, lv) + + expectedVal, err := ExtractFromLiteral(expectedLV) + assert.NoError(t, err) + actualVal, err := ExtractFromLiteral(lv) + assert.NoError(t, err) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("Blob", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Blob{Blob: &core.BlobType{ + Dimensionality: core.BlobType_SINGLE, + }}} + expectedLV := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Blob{ + Blob: &core.Blob{ + Uri: "s3://blah/blah/blah", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_SINGLE, + }, + }, + }, + }, + }}} + lv, err := MakeLiteralForType(literalType, "s3://blah/blah/blah") + assert.NoError(t, err) + + assert.Equal(t, expectedLV, lv) + + expectedVal, err := ExtractFromLiteral(expectedLV) + assert.NoError(t, err) + actualVal, err := ExtractFromLiteral(lv) + assert.NoError(t, err) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("MultipartBlob", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Blob{Blob: &core.BlobType{ + Dimensionality: core.BlobType_MULTIPART, + }}} + expectedLV := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Blob{ + Blob: &core.Blob{ + Uri: "s3://blah/blah/blah", + Metadata: &core.BlobMetadata{ + Type: &core.BlobType{ + Dimensionality: core.BlobType_MULTIPART, + }, + }, + }, + }, + }}} + lv, err := MakeLiteralForType(literalType, "s3://blah/blah/blah") + assert.NoError(t, err) + + assert.Equal(t, expectedLV, lv) + + expectedVal, err := ExtractFromLiteral(expectedLV) + assert.NoError(t, err) + actualVal, err := ExtractFromLiteral(lv) + assert.NoError(t, err) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("enumtype-nil", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_EnumType{EnumType: &core.EnumType{}}} + _, err := MakeLiteralForType(literalType, nil) + assert.Error(t, err) + _, err = MakeLiteralForType(literalType, "") + assert.Error(t, err) + }) + + t.Run("enumtype-happy", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_EnumType{EnumType: &core.EnumType{Values: []string{"x", "y", "z"}}}} + expected := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_StringValue{StringValue: "x"}}}}}} + v, err := MakeLiteralForType(literalType, "x") + assert.NoError(t, err) + assert.Equal(t, expected, v) + _, err = MakeLiteralForType(literalType, "") + assert.Error(t, err) + }) + + t.Run("enumtype-illegal-val", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_EnumType{EnumType: &core.EnumType{Values: []string{"x", "y", "z"}}}} + _, err := MakeLiteralForType(literalType, "m") + assert.Error(t, err) + }) + + t.Run("Nil string", func(t *testing.T) { + var literalType = &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_STRING}} + l, err := MakeLiteralForType(literalType, nil) + assert.NoError(t, err) + assert.Equal(t, "", l.GetScalar().GetPrimitive().GetStringValue()) + l, err = MakeLiteralForType(literalType, "") + assert.NoError(t, err) + assert.Equal(t, "", l.GetScalar().GetPrimitive().GetStringValue()) + }) + + t.Run("Structured Data Set", func(t *testing.T) { + var dataSetColumns []*core.StructuredDatasetType_DatasetColumn + dataSetColumns = append(dataSetColumns, &core.StructuredDatasetType_DatasetColumn{ + Name: "Price", + LiteralType: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_FLOAT, + }, + }, + }) + var literalType = &core.LiteralType{Type: &core.LiteralType_StructuredDatasetType{StructuredDatasetType: &core.StructuredDatasetType{ + Columns: dataSetColumns, + Format: "testFormat", + }}} + + expectedLV := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "s3://blah/blah/blah", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Columns: dataSetColumns, + Format: "testFormat", + }, + }, + }, + }, + }}} + lv, err := MakeLiteralForType(literalType, "s3://blah/blah/blah") + assert.NoError(t, err) + + assert.Equal(t, expectedLV, lv) + + expectedVal, err := ExtractFromLiteral(expectedLV) + assert.NoError(t, err) + actualVal, err := ExtractFromLiteral(lv) + assert.NoError(t, err) + assert.Equal(t, expectedVal, actualVal) + }) + + t.Run("Union", func(t *testing.T) { + var literalType = &core.LiteralType{ + Type: &core.LiteralType_UnionType{ + UnionType: &core.UnionType{ + Variants: []*core.LiteralType{ + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}, + {Type: &core.LiteralType_Simple{Simple: core.SimpleType_FLOAT}}, + }, + }, + }, + } + expectedLV := &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Union{ + Union: &core.Union{ + Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_FLOAT}}, + Value: &core.Literal{Value: &core.Literal_Scalar{Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{Primitive: &core.Primitive{Value: &core.Primitive_FloatValue{FloatValue: 0.1}}}}}}, + }, + }, + }}} + lv, err := MakeLiteralForType(literalType, float64(0.1)) + assert.NoError(t, err) + assert.Equal(t, expectedLV, lv) + expectedVal, err := ExtractFromLiteral(expectedLV) + assert.NoError(t, err) + actualVal, err := ExtractFromLiteral(lv) + assert.NoError(t, err) + assert.Equal(t, expectedVal, actualVal) + }) +} diff --git a/flyteidl2/common/configuration.proto b/flyteidl2/common/configuration.proto new file mode 100644 index 0000000000..1d47395336 --- /dev/null +++ b/flyteidl2/common/configuration.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package flyteidl2.common; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/common"; + +// The source of an attribute. We may have other sources in the future. +enum AttributesSource { + // The source is unspecified. + SOURCE_UNSPECIFIED = 0; + + // The configuration is a global configuration. + GLOBAL = 1; + + // The configuration is a domain configuration. + DOMAIN = 2; + + // The configuration is a project configuration. + PROJECT = 3; + + // The configuration is a project-domain configuration. + PROJECT_DOMAIN = 4; + + // The configuration is a org configuration. + ORG = 5; +} diff --git a/flyteidl2/core/errors.proto b/flyteidl2/core/errors.proto new file mode 100644 index 0000000000..417f1f6e32 --- /dev/null +++ b/flyteidl2/core/errors.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package flyteidl2.core; + +import "flyteidl2/core/execution.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core"; + +// Error message to propagate detailed errors from container executions to the execution +// engine. +message ContainerError { + // A simplified code for errors, so that we can provide a glossary of all possible errors. + string code = 1; + // A detailed error message. + string message = 2; + + // Defines a generic error type that dictates the behavior of the retry strategy. + enum Kind { + NON_RECOVERABLE = 0; + RECOVERABLE = 1; + } + + // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + Kind kind = 3; + + // Defines the origin of the error (system, user, unknown). + ExecutionError.ErrorKind origin = 4; +} + +// Defines the errors.pb file format the container can produce to communicate +// failure reasons to the execution engine. +message ErrorDocument { + // The error raised during execution. + ContainerError error = 1; +} diff --git a/flyteidl2/datacatalog/datacatalog.proto b/flyteidl2/datacatalog/datacatalog.proto new file mode 100644 index 0000000000..865b3a9ac3 --- /dev/null +++ b/flyteidl2/datacatalog/datacatalog.proto @@ -0,0 +1,413 @@ +syntax = "proto3"; + +package flyteidl2.datacatalog; + +import "flyteidl2/core/literals.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog"; + +// TODO @pvditt clean this up + +/* + * Data Catalog service definition + * Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + * Artifacts are associated with a Dataset, and can be tagged for retrieval. + */ +service DataCatalog { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + rpc CreateDataset(CreateDatasetRequest) returns (CreateDatasetResponse); + + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + rpc GetDataset(GetDatasetRequest) returns (GetDatasetResponse); + + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + rpc CreateArtifact(CreateArtifactRequest) returns (CreateArtifactResponse); + + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + rpc GetArtifact(GetArtifactRequest) returns (GetArtifactResponse); + + // Associate a tag with an artifact. Tags are unique within a Dataset. + rpc AddTag(AddTagRequest) returns (AddTagResponse); + + // Return a paginated list of artifacts + rpc ListArtifacts(ListArtifactsRequest) returns (ListArtifactsResponse); + + // Return a paginated list of datasets + rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse); + + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + rpc UpdateArtifact(UpdateArtifactRequest) returns (UpdateArtifactResponse); + + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + rpc GetOrExtendReservation(GetOrExtendReservationRequest) returns (GetOrExtendReservationResponse); + + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + rpc ReleaseReservation(ReleaseReservationRequest) returns (ReleaseReservationResponse); +} + +/* + * Request message for creating a Dataset. + */ +message CreateDatasetRequest { + Dataset dataset = 1; +} + +/* + * Response message for creating a Dataset + */ +message CreateDatasetResponse {} + +/* + * Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier + * which is a combination of several fields. + */ +message GetDatasetRequest { + DatasetID dataset = 1; +} + +/* + * Response message for retrieving a Dataset. The response will include the metadata for the + * Dataset. + */ +message GetDatasetResponse { + Dataset dataset = 1; +} + +/* + * Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that + * can be one of artifact_id or tag. The result returned will include the artifact data and metadata + * associated with the artifact. + */ +message GetArtifactRequest { + DatasetID dataset = 1; + + oneof query_handle { + string artifact_id = 2; + string tag_name = 3; + } +} + +/* + * Response message for retrieving an Artifact. The result returned will include the artifact data + * and metadata associated with the artifact. + */ +message GetArtifactResponse { + Artifact artifact = 1; +} + +/* + * Request message for creating an Artifact and its associated artifact Data. + */ +message CreateArtifactRequest { + Artifact artifact = 1; +} + +/* + * Response message for creating an Artifact. + */ +message CreateArtifactResponse {} + +/* + * Request message for tagging an Artifact. + */ +message AddTagRequest { + Tag tag = 1; +} + +/* + * Response message for tagging an Artifact. + */ +message AddTagResponse {} + +// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. +message ListArtifactsRequest { + // Use a datasetID for which you want to retrieve the artifacts + DatasetID dataset = 1; + + // Apply the filter expression to this query + FilterExpression filter = 2; + // Pagination options to get a page of artifacts + PaginationOptions pagination = 3; +} + +// Response to list artifacts +message ListArtifactsResponse { + // The list of artifacts + repeated Artifact artifacts = 1; + // Token to use to request the next page, pass this into the next requests PaginationOptions + string next_token = 2; +} + +// List the datasets for the given query +message ListDatasetsRequest { + // Apply the filter expression to this query + FilterExpression filter = 1; + // Pagination options to get a page of datasets + PaginationOptions pagination = 2; +} + +// List the datasets response with token for next pagination +message ListDatasetsResponse { + // The list of datasets + repeated Dataset datasets = 1; + // Token to use to request the next page, pass this into the next requests PaginationOptions + string next_token = 2; +} + +/* + * Request message for updating an Artifact and overwriting its associated ArtifactData. + */ +message UpdateArtifactRequest { + // ID of dataset the artifact is associated with + DatasetID dataset = 1; + + // Either ID of artifact or name of tag to retrieve existing artifact from + oneof query_handle { + string artifact_id = 2; + string tag_name = 3; + } + + // List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + // ArtifactData entries will be removed from the underlying blob storage and database. + repeated ArtifactData data = 4; + + // Update execution metadata(including execution domain, name, node, project data) when overwriting cache + Metadata metadata = 5; +} + +/* + * Response message for updating an Artifact. + */ +message UpdateArtifactResponse { + // The unique ID of the artifact updated + string artifact_id = 1; +} + +/* + * ReservationID message that is composed of several string fields. + */ +message ReservationID { + // The unique ID for the reserved dataset + DatasetID dataset_id = 1; + + // The specific artifact tag for the reservation + string tag_name = 2; +} + +// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. +message GetOrExtendReservationRequest { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; + + // Requested reservation extension heartbeat interval + google.protobuf.Duration heartbeat_interval = 3; +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +message Reservation { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; + + // Recommended heartbeat interval to extend reservation + google.protobuf.Duration heartbeat_interval = 3; + + // Expiration timestamp of this reservation + google.protobuf.Timestamp expires_at = 4; + + // Free-form metadata associated with the artifact + Metadata metadata = 6; +} + +// Response including either a newly minted reservation or the existing reservation +message GetOrExtendReservationResponse { + // The reservation to be acquired or extended + Reservation reservation = 1; +} + +// Request to release reservation +message ReleaseReservationRequest { + // The unique ID for the reservation + ReservationID reservation_id = 1; + + // The unique ID of the owner for the reservation + string owner_id = 2; +} + +// Response to release reservation +message ReleaseReservationResponse {} + +/* + * Dataset message. It is uniquely identified by DatasetID. + */ +message Dataset { + DatasetID id = 1; + Metadata metadata = 2; + repeated string partitionKeys = 3; +} + +/* + * An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair + */ +message Partition { + string key = 1; + string value = 2; +} + +/* + * DatasetID message that is composed of several string fields. + */ +message DatasetID { + string project = 1; // The name of the project + string name = 2; // The name of the dataset + string domain = 3; // The domain (eg. environment) + string version = 4; // Version of the data schema + string UUID = 5; // UUID for the dataset (if set the above fields are optional) + + // Optional, org key applied to the resource. + string org = 6; +} + +/* + * Artifact message. It is composed of several string fields. + */ +message Artifact { + string id = 1; // The unique ID of the artifact + DatasetID dataset = 2; // The Dataset that the artifact belongs to + repeated ArtifactData data = 3; // A list of data that is associated with the artifact + Metadata metadata = 4; // Free-form metadata associated with the artifact + repeated Partition partitions = 5; + repeated Tag tags = 6; + google.protobuf.Timestamp created_at = 7; // creation timestamp of artifact, autogenerated by service +} + +/* + * ArtifactData that belongs to an artifact + */ +message ArtifactData { + string name = 1; + flyteidl2.core.Literal value = 2; +} + +/* + * Tag message that is unique to a Dataset. It is associated to a single artifact and + * can be retrieved by name later. + */ +message Tag { + string name = 1; // Name of tag + string artifact_id = 2; // The tagged artifact + DatasetID dataset = 3; // The Dataset that this tag belongs to +} + +/* + * Metadata representation for artifacts and datasets + */ +message Metadata { + map key_map = 1; // key map is a dictionary of key/val strings that represent metadata +} + +// Filter expression that is composed of a combination of single filters +message FilterExpression { + repeated SinglePropertyFilter filters = 1; +} + +// A single property to filter on. +message SinglePropertyFilter { + oneof property_filter { + TagPropertyFilter tag_filter = 1; + PartitionPropertyFilter partition_filter = 2; + ArtifactPropertyFilter artifact_filter = 3; + DatasetPropertyFilter dataset_filter = 4; + } + + // as use-cases come up we can add more operators, ex: gte, like, not eq etc. + enum ComparisonOperator { + EQUALS = 0; + } + + ComparisonOperator operator = 10; // field 10 in case we add more entities to query + // Next field number: 11 +} + +// Artifact properties we can filter by +message ArtifactPropertyFilter { + // oneof because we can add more properties in the future + oneof property { + string artifact_id = 1; + } +} + +// Tag properties we can filter by +message TagPropertyFilter { + oneof property { + string tag_name = 1; + } +} + +// Partition properties we can filter by +message PartitionPropertyFilter { + oneof property { + KeyValuePair key_val = 1; + } +} + +message KeyValuePair { + string key = 1; + string value = 2; +} + +// Dataset properties we can filter by +message DatasetPropertyFilter { + oneof property { + string project = 1; + string name = 2; + string domain = 3; + string version = 4; + // Optional, org key applied to the dataset. + string org = 5; + } +} + +// Pagination options for making list requests +message PaginationOptions { + // the max number of results to return + uint32 limit = 1; + + // the token to pass to fetch the next page + string token = 2; + + // the property that we want to sort the results by + SortKey sortKey = 3; + + // the sort order of the results + SortOrder sortOrder = 4; + + enum SortOrder { + DESCENDING = 0; + ASCENDING = 1; + } + + enum SortKey { + CREATION_TIME = 0; + } +} diff --git a/flyteidl2/event/cloudevents.proto b/flyteidl2/event/cloudevents.proto new file mode 100644 index 0000000000..3d1144384a --- /dev/null +++ b/flyteidl2/event/cloudevents.proto @@ -0,0 +1,79 @@ +syntax = "proto3"; + +package flyteidl2.event; + +import "flyteidl2/core/artifact_id.proto"; +import "flyteidl2/core/identifier.proto"; +import "flyteidl2/core/interface.proto"; +import "flyteidl2/event/event.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event"; + +// This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional +// information that downstream consumers may find useful. +message CloudEventWorkflowExecution { + event.WorkflowExecutionEvent raw_event = 1; + + core.TypedInterface output_interface = 2; + + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + repeated core.ArtifactID artifact_ids = 3; + core.WorkflowExecutionIdentifier reference_execution = 4; + string principal = 5; + + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + core.Identifier launch_plan_id = 6; + + // We can't have the ExecutionMetadata object directly because of import cycle + map labels = 7; +} + +message CloudEventNodeExecution { + event.NodeExecutionEvent raw_event = 1; + + // The relevant task execution if applicable + core.TaskExecutionIdentifier task_exec_id = 2; + + // The typed interface for the task that produced the event. + core.TypedInterface output_interface = 3; + + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + repeated core.ArtifactID artifact_ids = 4; + string principal = 5; + + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + core.Identifier launch_plan_id = 6; + + // We can't have the ExecutionMetadata object directly because of import cycle + map labels = 7; +} + +message CloudEventTaskExecution { + event.TaskExecutionEvent raw_event = 1; + // We can't have the ExecutionMetadata object directly because of import cycle + map labels = 2; +} + +// This event is to be sent by Admin after it creates an execution. +message CloudEventExecutionStart { + // The execution created. + core.WorkflowExecutionIdentifier execution_id = 1; + // The launch plan used. + core.Identifier launch_plan_id = 2; + + core.Identifier workflow_id = 3; + + // Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. + repeated core.ArtifactID artifact_ids = 4; + + // Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. + repeated string artifact_trackers = 5; + + string principal = 6; +} diff --git a/flyteidl2/event/event.proto b/flyteidl2/event/event.proto new file mode 100644 index 0000000000..526efde052 --- /dev/null +++ b/flyteidl2/event/event.proto @@ -0,0 +1,326 @@ +syntax = "proto3"; + +package flyteidl2.event; + +import "flyteidl2/core/catalog.proto"; +import "flyteidl2/core/execution.proto"; +import "flyteidl2/core/identifier.proto"; +import "flyteidl2/core/literals.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event"; + +message WorkflowExecutionEvent { + // Workflow execution id + core.WorkflowExecutionIdentifier execution_id = 1; + + // the id of the originator (Propeller) of the event + string producer_id = 2; + + core.WorkflowExecution.Phase phase = 3; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the workflow. + google.protobuf.Timestamp occurred_at = 4; + + oneof output_result { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 5; + + // Error information for the execution + core.ExecutionError error = 6; + + // Raw output data produced by this workflow execution. + core.LiteralMap output_data = 7; + } +} + +message NodeExecutionEvent { + // Unique identifier for this node execution + core.NodeExecutionIdentifier id = 1; + + // the id of the originator (Propeller) of the event + string producer_id = 2; + + core.NodeExecution.Phase phase = 3; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the node. + google.protobuf.Timestamp occurred_at = 4; + + oneof input_value { + string input_uri = 5; + + // Raw input data consumed by this node execution. + core.LiteralMap input_data = 20; + } + + oneof output_result { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 6; + + // Error information for the execution + core.ExecutionError error = 7; + + // Raw output data produced by this node execution. + core.LiteralMap output_data = 15; + } + + // Additional metadata to do with this event's node target based + // on the node type + oneof target_metadata { + WorkflowNodeMetadata workflow_node_metadata = 8; + TaskNodeMetadata task_node_metadata = 14; + } + + // [To be deprecated] Specifies which task (if any) launched this node. + ParentTaskExecutionMetadata parent_task_metadata = 9; + + // Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + ParentNodeExecutionMetadata parent_node_metadata = 10; + + // Retry group to indicate grouping of nodes by retries + string retry_group = 11; + + // Identifier of the node in the original workflow/graph + // This maps to value of WorkflowTemplate.nodes[X].id + string spec_node_id = 12; + + // Friendly readable name for the node + string node_name = 13; + + int32 event_version = 16; + + // Whether this node launched a subworkflow. + bool is_parent = 17; + + // Whether this node yielded a dynamic workflow. + bool is_dynamic = 18; + + // String location uniquely identifying where the deck HTML file is + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + string deck_uri = 19; + + // This timestamp represents the instant when the event was reported by the executing framework. For example, + // when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + // literal inputs are initially copied. The event however will not be sent until after the copy completes. + // Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + google.protobuf.Timestamp reported_at = 21; + + // Indicates if this node is an ArrayNode. + bool is_array = 22; + + // So that Admin doesn't have to rebuild the node execution graph to find the target entity, propeller will fill this + // in optionally - currently this is only filled in for subworkflows. This is the ID of the subworkflow corresponding + // to this node execution. It is difficult to find because Admin only sees one node at a time. A subworkflow could be + // nested multiple layers deep, and you'd need to access the correct workflow template to know the target subworkflow. + core.Identifier target_entity = 23; + + // Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of + // the tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not + // even registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea + // if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, + // as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. + bool is_in_dynamic_chain = 24; + + // Whether this node launched an eager task. + bool is_eager = 25; +} + +// For Workflow Nodes we need to send information about the workflow that's launched +message WorkflowNodeMetadata { + core.WorkflowExecutionIdentifier execution_id = 1; +} + +message TaskNodeMetadata { + // Captures the status of caching for this execution. + core.CatalogCacheStatus cache_status = 1; + // This structure carries the catalog artifact information + core.CatalogMetadata catalog_key = 2; + // Captures the status of cache reservations for this execution. + core.CatalogReservation.Status reservation_status = 3; + // The latest checkpoint location + string checkpoint_uri = 4; +} + +message ParentTaskExecutionMetadata { + core.TaskExecutionIdentifier id = 1; +} + +message ParentNodeExecutionMetadata { + // Unique identifier of the parent node id within the execution + // This is value of core.NodeExecutionIdentifier.node_id of the parent node + string node_id = 1; +} + +message EventReason { + // An explanation for this event + string reason = 1; + + // The time this reason occurred + google.protobuf.Timestamp occurred_at = 2; +} + +// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. +message TaskExecutionEvent { + // ID of the task. In combination with the retryAttempt this will indicate + // the task execution uniquely for a given parent node execution. + core.Identifier task_id = 1; + + // A task execution is always kicked off by a node execution, the event consumer + // will use the parent_id to relate the task to it's parent node execution + core.NodeExecutionIdentifier parent_node_execution_id = 2; + + // retry attempt number for this task, ie., 2 for the second attempt + uint32 retry_attempt = 3; + + // Phase associated with the event + core.TaskExecution.Phase phase = 4; + + // id of the process that sent this event, mainly for trace debugging + string producer_id = 5; + + // log information for the task execution + repeated core.TaskLog logs = 6; + + // This timestamp represents when the original event occurred, it is generated + // by the executor of the task. + google.protobuf.Timestamp occurred_at = 7; + + oneof input_value { + // URI of the input file, it encodes all the information + // including Cloud source provider. ie., s3://... + string input_uri = 8; + + // Raw input data consumed by this task execution. + core.LiteralMap input_data = 19; + } + + oneof output_result { + // URI to the output of the execution, it will be in a format that encodes all the information + // including Cloud source provider. ie., s3://... + string output_uri = 9; + + // Error information for the execution + core.ExecutionError error = 10; + + // Raw output data produced by this task execution. + core.LiteralMap output_data = 17; + } + + // Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + google.protobuf.Struct custom_info = 11; + + // Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + // that should be recorded regardless of the lack of phase change. + // The version field should be incremented when metadata changes across the duration of an individual phase. + uint32 phase_version = 12; + + // An optional explanation for the phase transition. + // Deprecated: Use reasons instead. + string reason = 13 [deprecated = true]; + + // An optional list of explanations for the phase transition. + repeated EventReason reasons = 21; + + // A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + // this type will be identical, but not all task executions necessarily use pre-registered definitions and this + // type is useful to render the task in the UI, filter task executions, etc. + string task_type = 14; + + // Metadata around how a task was executed. + TaskExecutionMetadata metadata = 16; + + // The event version is used to indicate versioned changes in how data is reported using this + // proto message. For example, event_verison > 0 means that maps tasks report logs using the + // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + // in this message. + int32 event_version = 18; + + // This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + // pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + // but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + // facilitates a more accurate portrayal of the evaluation time-series. + google.protobuf.Timestamp reported_at = 20; + + // Contains metadata required to identify logs related to this task execution + core.LogContext log_context = 22; +} + +// This message contains metadata about external resources produced or used by a specific task execution. +message ExternalResourceInfo { + // Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + string external_id = 1; + + // A unique index for the external resource with respect to all external resources for this task. Although the + // identifier may change between task reporting events or retries, this will remain the same to enable aggregating + // information from multiple reports. + uint32 index = 2; + + // Retry attempt number for this external resource, ie., 2 for the second attempt + uint32 retry_attempt = 3; + + // Phase associated with the external resource + core.TaskExecution.Phase phase = 4; + + // Captures the status of caching for this external resource execution. + core.CatalogCacheStatus cache_status = 5; + + // log information for the external resource execution + repeated core.TaskLog logs = 6; + + // Additional metadata to do with this event's node target based on the node type. We are + // explicitly not including the task_node_metadata here because it is not clear if it is needed. + // If we decide to include in the future, we should deprecate the cache_status field. + oneof target_metadata { + WorkflowNodeMetadata workflow_node_metadata = 7; + } + + // Extensible field for custom, plugin-specific info + google.protobuf.Struct custom_info = 8; + + // Contains metadata required to identify logs related to this task execution + core.LogContext log_context = 9; +} + +// This message holds task execution metadata specific to resource allocation used to manage concurrent +// executions for a project namespace. +message ResourcePoolInfo { + // Unique resource ID used to identify this execution when allocating a token. + string allocation_token = 1; + + // Namespace under which this task execution requested an allocation token. + string namespace = 2; +} + +// Holds metadata around how a task was executed. +// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, +// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. +// Metadata is a container for these attributes across the task execution lifecycle. +message TaskExecutionMetadata { + // Unique, generated name for this task execution used by the backend. + string generated_name = 1; + + // Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + repeated ExternalResourceInfo external_resources = 2; + + // Includes additional data on concurrent resource management used during execution.. + // This is a repeated field because a plugin can request multiple resource allocations during execution. + repeated ResourcePoolInfo resource_pool_info = 3; + + // The identifier of the plugin used to execute this task. + string plugin_identifier = 4; + + // Includes the broad category of machine used for this specific task execution. + enum InstanceClass { + // The default instance class configured for the flyte application platform. + DEFAULT = 0; + + // The instance class configured for interruptible tasks. + INTERRUPTIBLE = 1; + } + InstanceClass instance_class = 16; +} diff --git a/flyteidl2/plugins/common.proto b/flyteidl2/plugins/common.proto new file mode 100644 index 0000000000..bd00e0a7e6 --- /dev/null +++ b/flyteidl2/plugins/common.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package flyteidl2.plugins; + +import "flyteidl2/core/tasks.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins"; + +enum RestartPolicy { + RESTART_POLICY_NEVER = 0; + RESTART_POLICY_ON_FAILURE = 1; + RESTART_POLICY_ALWAYS = 2; +} + +message CommonReplicaSpec { + // Number of replicas + int32 replicas = 1; + + // Image used for the replica group + string image = 2; + + // Resources required for the replica group + core.Resources resources = 3; + + // RestartPolicy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4; +} diff --git a/flyteidl2/plugins/kubeflow/common.proto b/flyteidl2/plugins/kubeflow/common.proto new file mode 100644 index 0000000000..00ebf4775d --- /dev/null +++ b/flyteidl2/plugins/kubeflow/common.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package flyteidl2.plugins.kubeflow; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow"; + +enum CleanPodPolicy { + CLEANPOD_POLICY_NONE = 0; + CLEANPOD_POLICY_RUNNING = 1; + CLEANPOD_POLICY_ALL = 2; +} + +message RunPolicy { + // Defines the policy to kill pods after the job completes. Default to None. + CleanPodPolicy clean_pod_policy = 1; + + // TTL to clean up jobs. Default to infinite. + int32 ttl_seconds_after_finished = 2; + + // Specifies the duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer. + int32 active_deadline_seconds = 3; + + // Number of retries before marking this job failed. + int32 backoff_limit = 4; +} diff --git a/flyteidl2/plugins/kubeflow/mpi.proto b/flyteidl2/plugins/kubeflow/mpi.proto new file mode 100644 index 0000000000..601f3b0ed0 --- /dev/null +++ b/flyteidl2/plugins/kubeflow/mpi.proto @@ -0,0 +1,48 @@ +syntax = "proto3"; + +package flyteidl2.plugins.kubeflow; + +import "flyteidl2/core/tasks.proto"; +import "flyteidl2/plugins/common.proto"; +import "flyteidl2/plugins/kubeflow/common.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow"; + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +message DistributedMPITrainingTask { + // Worker replicas spec + DistributedMPITrainingReplicaSpec worker_replicas = 1; + + // Master replicas spec + DistributedMPITrainingReplicaSpec launcher_replicas = 2; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 3; + + // Number of slots per worker + int32 slots = 4; +} + +// Replica specification for distributed MPI training +message DistributedMPITrainingReplicaSpec { + // 1~4 deprecated. Use common instead. + // Number of replicas + int32 replicas = 1 [deprecated = true]; + + // Image used for the replica group + string image = 2 [deprecated = true]; + + // Resources required for the replica group + core.Resources resources = 3 [deprecated = true]; + + // Restart policy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4 [deprecated = true]; + + // MPI sometimes requires different command set for different replica groups + repeated string command = 5; + + // The common replica spec + CommonReplicaSpec common = 6; +} diff --git a/flyteidl2/plugins/kubeflow/pytorch.proto b/flyteidl2/plugins/kubeflow/pytorch.proto new file mode 100644 index 0000000000..76172b6aa9 --- /dev/null +++ b/flyteidl2/plugins/kubeflow/pytorch.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package flyteidl2.plugins.kubeflow; + +import "flyteidl2/core/tasks.proto"; +import "flyteidl2/plugins/common.proto"; +import "flyteidl2/plugins/kubeflow/common.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow"; + +// Custom proto for torch elastic config for distributed training using +// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go +message ElasticConfig { + string rdzv_backend = 1; + int32 min_replicas = 2; + int32 max_replicas = 3; + int32 nproc_per_node = 4; + int32 max_restarts = 5; +} + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +message DistributedPyTorchTrainingTask { + // Worker replicas spec + DistributedPyTorchTrainingReplicaSpec worker_replicas = 1; + + // Master replicas spec, master replicas can only have 1 replica + DistributedPyTorchTrainingReplicaSpec master_replicas = 2; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 3; + + // config for an elastic pytorch job + ElasticConfig elastic_config = 4; +} + +message DistributedPyTorchTrainingReplicaSpec { + // 1~4 deprecated. Use common instead. + // Number of replicas + int32 replicas = 1 [deprecated = true]; + + // Image used for the replica group + string image = 2 [deprecated = true]; + + // Resources required for the replica group + core.Resources resources = 3 [deprecated = true]; + + // Restart policy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4 [deprecated = true]; + + // The common replica spec + CommonReplicaSpec common = 5; +} diff --git a/flyteidl2/plugins/kubeflow/tensorflow.proto b/flyteidl2/plugins/kubeflow/tensorflow.proto new file mode 100644 index 0000000000..8573cf71dd --- /dev/null +++ b/flyteidl2/plugins/kubeflow/tensorflow.proto @@ -0,0 +1,47 @@ +syntax = "proto3"; + +package flyteidl2.plugins.kubeflow; + +import "flyteidl2/core/tasks.proto"; +import "flyteidl2/plugins/common.proto"; +import "flyteidl2/plugins/kubeflow/common.proto"; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow"; + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +message DistributedTensorflowTrainingTask { + // Worker replicas spec + DistributedTensorflowTrainingReplicaSpec worker_replicas = 1; + + // Parameter server replicas spec + DistributedTensorflowTrainingReplicaSpec ps_replicas = 2; + + // Chief replicas spec + DistributedTensorflowTrainingReplicaSpec chief_replicas = 3; + + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy run_policy = 4; + + // Evaluator replicas spec + DistributedTensorflowTrainingReplicaSpec evaluator_replicas = 5; +} + +message DistributedTensorflowTrainingReplicaSpec { + // 1~4 deprecated. Use common instead. + // Number of replicas + int32 replicas = 1 [deprecated = true]; + + // Image used for the replica group + string image = 2 [deprecated = true]; + + // Resources required for the replica group + core.Resources resources = 3 [deprecated = true]; + + // Restart policy determines whether pods will be restarted when they exit + RestartPolicy restart_policy = 4 [deprecated = true]; + + // The common replica spec + CommonReplicaSpec common = 5; +} diff --git a/flyteidl2/plugins/mpi.proto b/flyteidl2/plugins/mpi.proto new file mode 100644 index 0000000000..70136607fc --- /dev/null +++ b/flyteidl2/plugins/mpi.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package flyteidl2.plugins; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins"; + +// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +message DistributedMPITrainingTask { + // number of worker spawned in the cluster for this job + int32 num_workers = 1; + + // number of launcher replicas spawned in the cluster for this job + // The launcher pod invokes mpirun and communicates with worker pods through MPI. + int32 num_launcher_replicas = 2; + + // number of slots per worker used in hostfile. + // The available slots (GPUs) in each pod. + int32 slots = 3; +} diff --git a/flyteidl2/plugins/presto.proto b/flyteidl2/plugins/presto.proto new file mode 100644 index 0000000000..4eb6628bec --- /dev/null +++ b/flyteidl2/plugins/presto.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package flyteidl2.plugins; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins"; + +// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field +// of a Presto task's TaskTemplate +message PrestoQuery { + string routing_group = 1; + string catalog = 2; + string schema = 3; + string statement = 4; +} diff --git a/flyteidl2/plugins/qubole.proto b/flyteidl2/plugins/qubole.proto new file mode 100644 index 0000000000..10c7686975 --- /dev/null +++ b/flyteidl2/plugins/qubole.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package flyteidl2.plugins; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins"; + +// Defines a query to execute on a hive cluster. +message HiveQuery { + string query = 1; + uint32 timeout_sec = 2; + uint32 retryCount = 3; +} + +// Defines a collection of hive queries. +message HiveQueryCollection { + repeated HiveQuery queries = 2; +} + +// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field +// of a hive task's TaskTemplate +message QuboleHiveJob { + string cluster_label = 1; + HiveQueryCollection query_collection = 2 [deprecated = true]; + repeated string tags = 3; + HiveQuery query = 4; +} diff --git a/flyteidl2/plugins/tensorflow.proto b/flyteidl2/plugins/tensorflow.proto new file mode 100644 index 0000000000..0a8367b9bd --- /dev/null +++ b/flyteidl2/plugins/tensorflow.proto @@ -0,0 +1,18 @@ +syntax = "proto3"; + +package flyteidl2.plugins; + +option go_package = "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins"; + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +message DistributedTensorflowTrainingTask { + // number of worker replicas spawned in the cluster for this job + int32 workers = 1; + // PS -> Parameter server + // number of ps replicas spawned in the cluster for this job + int32 ps_replicas = 2; + // number of chief replicas spawned in the cluster for this job + int32 chief_replicas = 3; + // number of evaluator replicas spawned in the cluster for this job + int32 evaluator_replicas = 4; +} diff --git a/flyteplugins/.golangci.yml b/flyteplugins/.golangci.yml new file mode 100644 index 0000000000..9767445d04 --- /dev/null +++ b/flyteplugins/.golangci.yml @@ -0,0 +1,38 @@ +# WARNING: THIS FILE IS MANAGED IN THE 'BOILERPLATE' REPO AND COPIED TO OTHER REPOSITORIES. +# ONLY EDIT THIS FILE FROM WITHIN THE 'FLYTEORG/BOILERPLATE' REPOSITORY: +# +# TO OPT OUT OF UPDATES, SEE https://github.com/flyteorg/boilerplate/blob/master/Readme.rst + +issues: + exclude: + - copylocks + exclude-dirs: + - pkg/client + +linters: + disable-all: true + enable: + - errcheck + - gci + - goconst + - goimports + - gosec + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - staticcheck + - typecheck + - unconvert + - unparam + - unused + +linters-settings: + gci: + custom-order: true + sections: + - standard + - default + - prefix(github.com/flyteorg) + skip-generated: true diff --git a/flyteplugins/.goreleaser.yml b/flyteplugins/.goreleaser.yml new file mode 100644 index 0000000000..8bdbc957bf --- /dev/null +++ b/flyteplugins/.goreleaser.yml @@ -0,0 +1,9 @@ +project_name: flyteplugins +builds: + - skip: true +changelog: + sort: asc + filters: + exclude: + - '^docs:' + - '^test:' diff --git a/flyteplugins/CODE_OF_CONDUCT.md b/flyteplugins/CODE_OF_CONDUCT.md new file mode 100755 index 0000000000..e12139d691 --- /dev/null +++ b/flyteplugins/CODE_OF_CONDUCT.md @@ -0,0 +1,2 @@ +This project is governed by LF AI Foundation's [code of conduct](https://lfprojects.org/policies/code-of-conduct/). +All contributors and participants agree to abide by its terms. diff --git a/flyteplugins/LICENSE b/flyteplugins/LICENSE new file mode 100755 index 0000000000..bed437514f --- /dev/null +++ b/flyteplugins/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2019 Lyft, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/flyteplugins/Makefile b/flyteplugins/Makefile new file mode 100755 index 0000000000..9861909e96 --- /dev/null +++ b/flyteplugins/Makefile @@ -0,0 +1,9 @@ +export REPOSITORY=flyteplugins +export REPO_ROOT=.. +include ../boilerplate/flyte/docker_build/Makefile +include ../boilerplate/flyte/golang_test_targets/Makefile + +.PHONY: update_boilerplate +update_boilerplate: + @curl https://raw.githubusercontent.com/flyteorg/boilerplate/master/boilerplate/update.sh -o boilerplate/update.sh + @boilerplate/update.sh diff --git a/flyteplugins/NOTICE b/flyteplugins/NOTICE new file mode 100755 index 0000000000..c3aef1fa32 --- /dev/null +++ b/flyteplugins/NOTICE @@ -0,0 +1,4 @@ +flyteplugins +Copyright 2019 Lyft Inc. + +This product includes software developed at Lyft Inc. diff --git a/flyteplugins/README.md b/flyteplugins/README.md new file mode 100755 index 0000000000..d2002cc54d --- /dev/null +++ b/flyteplugins/README.md @@ -0,0 +1,2 @@ +# flyteplugins +Plugins contributed by flyte community. diff --git a/flyteplugins/go/tasks/aws/client.go b/flyteplugins/go/tasks/aws/client.go new file mode 100644 index 0000000000..4bb781281d --- /dev/null +++ b/flyteplugins/go/tasks/aws/client.go @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2018 Lyft. All rights reserved. + */ + +// Package aws contains AWS-specific logic to handle execution and monitoring of batch jobs. +package aws + +import ( + "context" + "fmt" + "os" + "sync" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/session" + + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +const ( + EnvSharedCredFilePath = "AWS_SHARED_CREDENTIALS_FILE" // #nosec + EnvAwsProfile = "AWS_PROFILE" + ErrEmptyCredentials errors.ErrorCode = "EMPTY_CREDS" + ErrUnknownHost errors.ErrorCode = "UNKNOWN_HOST" +) + +type singleton struct { + client Client + lock sync.RWMutex +} + +var single = singleton{ + lock: sync.RWMutex{}, +} + +// Client is a generic AWS Client that can be used for all AWS Client libraries. +type Client interface { + GetSession() *session.Session + GetSdkConfig() *aws.Config + GetConfig() *Config + GetHostName() string +} + +type client struct { + config *Config + Session *session.Session + SdkConfig *aws.Config + HostName string +} + +// Gets the initialized session. +func (c client) GetSession() *session.Session { + return c.Session +} + +// Gets the final config that was used to initialize AWS Session. +func (c client) GetSdkConfig() *aws.Config { + return c.SdkConfig +} + +// Gets client's Hostname +func (c client) GetHostName() string { + return c.HostName +} + +func (c client) GetConfig() *Config { + return c.config +} + +func newClient(ctx context.Context, cfg *Config) (Client, error) { + awsConfig := aws.NewConfig().WithRegion(cfg.Region).WithMaxRetries(cfg.Retries) + if os.Getenv(EnvSharedCredFilePath) != "" { + creds := credentials.NewSharedCredentials(os.Getenv(EnvSharedCredFilePath), os.Getenv(EnvAwsProfile)) + if creds == nil { + return nil, fmt.Errorf("unable to Load AWS credentials") + } + + _, e := creds.Get() + if e != nil { + return nil, errors.Wrapf(ErrEmptyCredentials, e, "Empty credentials") + } + + awsConfig = awsConfig.WithCredentials(creds) + } + + sess, err := session.NewSession(awsConfig) + if err != nil { + logger.Fatalf(ctx, "Error while creating session: %v", err) + } + + hostname, err := os.Hostname() + if err != nil { + return nil, errors.Wrapf(ErrUnknownHost, err, "Unable to discover current hostname") + } + + return &client{ + config: cfg, + SdkConfig: awsConfig, + Session: sess, + HostName: hostname, + }, nil +} + +// Initializes singleton AWS Client if one hasn't been initialized yet. +func Init(ctx context.Context, cfg *Config) (err error) { + if single.client == nil { + single.lock.Lock() + defer single.lock.Unlock() + + if single.client == nil { + single.client, err = newClient(ctx, cfg) + } + } + + return err +} + +// Gets singleton AWS Client. +func GetClient() (c Client, err error) { + single.lock.RLock() + defer single.lock.RUnlock() + if single.client == nil { + single.client, err = newClient(context.TODO(), GetConfig()) + } + + return single.client, err +} + +func SetClient(c Client) { + single.lock.Lock() + defer single.lock.Unlock() + if single.client == nil { + single.client = c + } +} diff --git a/flyteplugins/go/tasks/aws/client_test.go b/flyteplugins/go/tasks/aws/client_test.go new file mode 100644 index 0000000000..3ead66bea1 --- /dev/null +++ b/flyteplugins/go/tasks/aws/client_test.go @@ -0,0 +1,24 @@ +/* + * Copyright (c) 2018 Lyft. All rights reserved. + */ + +package aws + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestClient_GetConfig(t *testing.T) { + assert.NoError(t, Init(context.TODO(), &Config{ + Retries: 2, + Region: "us-east-1", + })) + + c, err := GetClient() + assert.NoError(t, err) + assert.NotNil(t, c) + assert.NotNil(t, GetConfig()) +} diff --git a/flyteplugins/go/tasks/aws/config.go b/flyteplugins/go/tasks/aws/config.go new file mode 100644 index 0000000000..b98d04c70e --- /dev/null +++ b/flyteplugins/go/tasks/aws/config.go @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2018 Lyft. All rights reserved. + */ + +package aws + +import ( + "context" + + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/aws/retry" + awsConfig "github.com/aws/aws-sdk-go-v2/config" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var defaultConfig + +const ConfigSectionKey = "aws" + +var ( + defaultConfig = &Config{ + Region: "us-east-2", + Retries: 3, + } + + configSection = pluginsConfig.MustRegisterSubSection(ConfigSectionKey, defaultConfig) +) + +// Config section for AWS Package +type Config struct { + Region string `json:"region" pflag:",AWS Region to connect to."` + AccountID string `json:"accountId" pflag:",AWS Account Identifier."` + Retries int `json:"retries" pflag:",Number of retries."` + LogLevel aws.ClientLogMode `json:"logLevel" pflag:"-,Defines the Sdk Log Level."` +} + +type RateLimiterConfig struct { + Rate int64 `json:"rate" pflag:",Allowed rate of calls per second."` + Burst int `json:"burst" pflag:",Allowed burst rate of calls."` +} + +// Gets loaded config for AWS +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func (cfg Config) GetSdkConfig() (aws.Config, error) { + sdkConfig, err := awsConfig.LoadDefaultConfig(context.TODO(), + awsConfig.WithRegion(cfg.Region), + awsConfig.WithRetryer(func() aws.Retryer { + return retry.NewStandard(func(options *retry.StandardOptions) { + options.MaxAttempts = cfg.Retries + }) + }), + awsConfig.WithClientLogMode(cfg.LogLevel)) + if err != nil { + return aws.Config{}, err + } + + return sdkConfig, nil +} + +func MustRegisterSubSection(key config.SectionKey, cfg config.Config) config.Section { + return configSection.MustRegisterSection(key, cfg) +} diff --git a/flyteplugins/go/tasks/aws/config_flags.go b/flyteplugins/go/tasks/aws/config_flags.go new file mode 100755 index 0000000000..efe0c05416 --- /dev/null +++ b/flyteplugins/go/tasks/aws/config_flags.go @@ -0,0 +1,57 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package aws + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "region"), defaultConfig.Region, "AWS Region to connect to.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "accountId"), defaultConfig.AccountID, "AWS Account Identifier.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "retries"), defaultConfig.Retries, "Number of retries.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/aws/config_flags_test.go b/flyteplugins/go/tasks/aws/config_flags_test.go new file mode 100755 index 0000000000..4a62659b85 --- /dev/null +++ b/flyteplugins/go/tasks/aws/config_flags_test.go @@ -0,0 +1,144 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package aws + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("region", testValue) + if vString, err := cmdFlags.GetString("region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Region) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_accountId", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("accountId", testValue) + if vString, err := cmdFlags.GetString("accountId"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.AccountID) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_retries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("retries", testValue) + if vInt, err := cmdFlags.GetInt("retries"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.Retries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/config/config.go b/flyteplugins/go/tasks/config/config.go new file mode 100644 index 0000000000..5d0c6007b0 --- /dev/null +++ b/flyteplugins/go/tasks/config/config.go @@ -0,0 +1,30 @@ +package config + +import ( + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +const configSectionKey = "plugins" + +var ( + // Root config section. If you are a plugin developer and your plugin needs a config, you should register + // your config as a subsection for this root section. + rootSection = config.MustRegisterSection(configSectionKey, &Config{}) +) + +// Config is the top level plugins config. +type Config struct { +} + +// GetConfig retrieves the current config value or default. +func GetConfig() *Config { + return rootSection.GetConfig().(*Config) +} + +func MustRegisterSubSection(subSectionKey string, section config.Config) config.Section { + return rootSection.MustRegisterSection(subSectionKey, section) +} + +func MustRegisterSubSectionWithUpdates(subSectionKey string, section config.Config, sectionUpdatedFn config.SectionUpdated) config.Section { + return rootSection.MustRegisterSectionWithUpdates(subSectionKey, section, sectionUpdatedFn) +} diff --git a/flyteplugins/go/tasks/config_load_test.go b/flyteplugins/go/tasks/config_load_test.go new file mode 100644 index 0000000000..d9c8467794 --- /dev/null +++ b/flyteplugins/go/tasks/config_load_test.go @@ -0,0 +1,141 @@ +package tasks_test + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + flyteK8sConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/spark" + "github.com/flyteorg/flyte/v2/flytestdlib/config" + "github.com/flyteorg/flyte/v2/flytestdlib/config/viper" +) + +func TestLoadConfig(t *testing.T) { + configAccessor := viper.NewAccessor(config.Options{ + StrictMode: true, + SearchPaths: []string{"testdata/config.yaml"}, + }) + + err := configAccessor.UpdateConfig(context.TODO()) + assert.NoError(t, err) + t.Run("k8s-config-test", func(t *testing.T) { + + k8sConfig := flyteK8sConfig.GetK8sPluginConfig() + assert.True(t, k8sConfig.InjectFinalizer) + assert.Equal(t, map[string]string{ + "annotationKey1": "annotationValue1", + "annotationKey2": "annotationValue2", + }, k8sConfig.DefaultAnnotations) + assert.Equal(t, map[string]string{ + "label1": "labelValue1", + "label2": "labelValue2", + }, k8sConfig.DefaultLabels) + assert.Equal(t, map[string]string{ + "AWS_METADATA_SERVICE_NUM_ATTEMPTS": "20", + "AWS_METADATA_SERVICE_TIMEOUT": "5", + "FLYTE_AWS_ACCESS_KEY_ID": "minio", + "FLYTE_AWS_ENDPOINT": "http://minio.flyte:9000", + "FLYTE_AWS_SECRET_ACCESS_KEY": "miniostorage", + }, k8sConfig.DefaultEnvVars) + assert.NotNil(t, k8sConfig.ResourceTolerations) + assert.Contains(t, k8sConfig.ResourceTolerations, v1.ResourceName("nvidia.com/gpu")) + tolGPU := v1.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + assert.Equal(t, []v1.Toleration{tolGPU}, k8sConfig.ResourceTolerations[v1.ResourceName("nvidia.com/gpu")]) + expectedCPU := resource.MustParse("1000m") + assert.True(t, expectedCPU.Equal(k8sConfig.DefaultCPURequest)) + expectedMemory := resource.MustParse("1024Mi") + assert.True(t, expectedMemory.Equal(k8sConfig.DefaultMemoryRequest)) + assert.Equal(t, map[string]string{"x/interruptible": "true"}, k8sConfig.InterruptibleNodeSelector) + assert.Equal(t, "x/flyte", k8sConfig.InterruptibleTolerations[0].Key) + assert.Equal(t, "interruptible", k8sConfig.InterruptibleTolerations[0].Value) + assert.NotNil(t, k8sConfig.DefaultPodSecurityContext) + assert.NotNil(t, k8sConfig.DefaultPodSecurityContext.FSGroup) + assert.Equal(t, *k8sConfig.DefaultPodSecurityContext.FSGroup, int64(2000)) + assert.NotNil(t, k8sConfig.DefaultPodSecurityContext.RunAsGroup) + assert.Equal(t, *k8sConfig.DefaultPodSecurityContext.RunAsGroup, int64(3000)) + assert.NotNil(t, k8sConfig.DefaultPodSecurityContext.RunAsUser) + assert.Equal(t, *k8sConfig.DefaultPodSecurityContext.RunAsUser, int64(1000)) + assert.NotNil(t, k8sConfig.DefaultSecurityContext) + assert.NotNil(t, k8sConfig.DefaultSecurityContext.AllowPrivilegeEscalation) + assert.False(t, *k8sConfig.DefaultSecurityContext.AllowPrivilegeEscalation) + assert.NotNil(t, k8sConfig.EnableHostNetworkingPod) + assert.True(t, *k8sConfig.EnableHostNetworkingPod) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[0].Name) + assert.Equal(t, "ndots", k8sConfig.DefaultPodDNSConfig.Options[0].Name) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[0].Value) + assert.Equal(t, "1", *k8sConfig.DefaultPodDNSConfig.Options[0].Value) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[1].Name) + assert.Equal(t, "single-request-reopen", k8sConfig.DefaultPodDNSConfig.Options[1].Name) + assert.Nil(t, k8sConfig.DefaultPodDNSConfig.Options[1].Value) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[2].Name) + assert.Equal(t, "timeout", k8sConfig.DefaultPodDNSConfig.Options[2].Name) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[2].Value) + assert.Equal(t, "1", *k8sConfig.DefaultPodDNSConfig.Options[2].Value) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[3].Name) + assert.Equal(t, "attempts", k8sConfig.DefaultPodDNSConfig.Options[3].Name) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Options[3].Value) + assert.Equal(t, "3", *k8sConfig.DefaultPodDNSConfig.Options[3].Value) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Nameservers) + assert.Equal(t, []string{"8.8.8.8", "8.8.4.4"}, k8sConfig.DefaultPodDNSConfig.Nameservers) + assert.NotNil(t, k8sConfig.DefaultPodDNSConfig.Searches) + assert.Equal(t, []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, k8sConfig.DefaultPodDNSConfig.Searches) + }) + + t.Run("logs-config-test", func(t *testing.T) { + logsConfig := logs.GetLogConfig() + assert.NotNil(t, logsConfig) + assert.True(t, logsConfig.IsKubernetesEnabled) + + assert.Equal(t, 1, len(logsConfig.AzureLogTemplates)) + assert.Equal(t, "Test Azure Logs", logsConfig.AzureLogTemplates[0].DisplayName) + assert.Equal(t, "https://portal.azure.com#@TEST_AZURE_URI/q/", logsConfig.AzureLogTemplates[0].TemplateURIs[0]) + }) + + t.Run("spark-config-test", func(t *testing.T) { + assert.NotNil(t, spark.GetSparkConfig()) + assert.NotNil(t, spark.GetSparkConfig().DefaultSparkConfig) + assert.Equal(t, 2, len(spark.GetSparkConfig().Features)) + assert.Equal(t, "feature1", spark.GetSparkConfig().Features[0].Name) + assert.Equal(t, "feature2", spark.GetSparkConfig().Features[1].Name) + assert.Equal(t, 2, len(spark.GetSparkConfig().Features[0].SparkConfig)) + assert.Equal(t, 2, len(spark.GetSparkConfig().Features[1].SparkConfig)) + + }) +} + +func TestLoadIncorrectConfig(t *testing.T) { + t.Run("logs-config-test-accept-bad-config", func(t *testing.T) { + configAccessor := viper.NewAccessor(config.Options{ + StrictMode: false, + SearchPaths: []string{"testdata/incorrect-config.yaml"}, + }) + + err := configAccessor.UpdateConfig(context.TODO()) + assert.NoError(t, err) + assert.NotNil(t, logs.GetLogConfig()) + assert.True(t, logs.GetLogConfig().IsKubernetesEnabled) + }) + + t.Run("logs-config-test-failfast", func(t *testing.T) { + configAccessor := viper.NewAccessor(config.Options{ + StrictMode: true, + SearchPaths: []string{"testdata/incorrect-config.yaml"}, + }) + + err := configAccessor.UpdateConfig(context.TODO()) + assert.Error(t, err) + }) +} diff --git a/flyteplugins/go/tasks/errors/errors.go b/flyteplugins/go/tasks/errors/errors.go new file mode 100644 index 0000000000..4eccdb9273 --- /dev/null +++ b/flyteplugins/go/tasks/errors/errors.go @@ -0,0 +1,29 @@ +package errors + +import ( + "github.com/flyteorg/flyte/v2/flytestdlib/errors" +) + +const ( + TaskFailedWithError errors.ErrorCode = "TaskFailedWithError" + DownstreamSystemError errors.ErrorCode = "DownstreamSystemError" + TaskFailedUnknownError errors.ErrorCode = "TaskFailedUnknownError" + BadTaskSpecification errors.ErrorCode = "BadTaskSpecification" + TaskEventRecordingFailed errors.ErrorCode = "TaskEventRecordingFailed" + MetadataAccessFailed errors.ErrorCode = "MetadataAccessFailed" + MetadataTooLarge errors.ErrorCode = "MetadataTooLarge" + PluginInitializationFailed errors.ErrorCode = "PluginInitializationFailed" + CacheFailed errors.ErrorCode = "AutoRefreshCacheFailed" + RuntimeFailure errors.ErrorCode = "RuntimeFailure" + CorruptedPluginState errors.ErrorCode = "CorruptedPluginState" + ResourceManagerFailure errors.ErrorCode = "ResourceManagerFailure" + BackOffError errors.ErrorCode = "BackOffError" +) + +func Errorf(errorCode errors.ErrorCode, msgFmt string, args ...interface{}) error { + return errors.Errorf(errorCode, msgFmt, args...) +} + +func Wrapf(errorCode errors.ErrorCode, err error, msgFmt string, args ...interface{}) error { + return errors.Wrapf(errorCode, err, msgFmt, args...) +} diff --git a/flyteplugins/go/tasks/logs/config.go b/flyteplugins/go/tasks/logs/config.go new file mode 100644 index 0000000000..99aff2ece8 --- /dev/null +++ b/flyteplugins/go/tasks/logs/config.go @@ -0,0 +1,54 @@ +package logs + +import ( + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" +) + +//go:generate pflags LogConfig --default-var=DefaultConfig + +// LogConfig encapsulates plugins' log configs +type LogConfig struct { + IsCloudwatchEnabled bool `json:"cloudwatch-enabled" pflag:",Enable Cloudwatch Logging"` + // Deprecated: Please use CloudwatchTemplateURI + CloudwatchRegion string `json:"cloudwatch-region" pflag:",AWS region in which Cloudwatch logs are stored."` + // Deprecated: Please use CloudwatchTemplateURI + CloudwatchLogGroup string `json:"cloudwatch-log-group" pflag:",Log group to which streams are associated."` + CloudwatchTemplateURI tasklog.TemplateURI `json:"cloudwatch-template-uri" pflag:",Template Uri to use when building cloudwatch log links"` + + IsKubernetesEnabled bool `json:"kubernetes-enabled" pflag:",Enable Kubernetes Logging"` + // Deprecated: Please use KubernetesTemplateURI + KubernetesURL string `json:"kubernetes-url" pflag:",Console URL for Kubernetes logs"` + KubernetesTemplateURI tasklog.TemplateURI `json:"kubernetes-template-uri" pflag:",Template Uri to use when building kubernetes log links"` + + IsStackDriverEnabled bool `json:"stackdriver-enabled" pflag:",Enable Log-links to stackdriver"` + // Deprecated: Please use StackDriverTemplateURI + GCPProjectName string `json:"gcp-project" pflag:",Name of the project in GCP"` + // Deprecated: Please use StackDriverTemplateURI + StackdriverLogResourceName string `json:"stackdriver-logresourcename" pflag:",Name of the logresource in stackdriver"` + StackDriverTemplateURI tasklog.TemplateURI `json:"stackdriver-template-uri" pflag:",Template Uri to use when building stackdriver log links"` + + DynamicLogLinks map[string]tasklog.TemplateLogPlugin `json:"dynamic-log-links" pflag:"-,Map of dynamic log links"` + + Templates []tasklog.TemplateLogPlugin `json:"templates" pflag:"-,"` + + AzureLogTemplates []tasklog.AzureLogsTemplatePlugin `json:"azure-log-templates" pflag:"-,"` +} + +var ( + DefaultConfig = LogConfig{ + IsKubernetesEnabled: true, + KubernetesTemplateURI: "http://localhost:30082/#!/log/{{ .namespace }}/{{ .podName }}/pod?namespace={{ .namespace }}", + } + + logConfigSection = config.MustRegisterSubSection("logs", &DefaultConfig) +) + +func GetLogConfig() *LogConfig { + return logConfigSection.GetConfig().(*LogConfig) +} + +// SetLogConfig should be used for unit testing only +func SetLogConfig(logConfig *LogConfig) error { + return logConfigSection.SetConfig(logConfig) +} diff --git a/flyteplugins/go/tasks/logs/logconfig_flags.go b/flyteplugins/go/tasks/logs/logconfig_flags.go new file mode 100755 index 0000000000..00c08a8a58 --- /dev/null +++ b/flyteplugins/go/tasks/logs/logconfig_flags.go @@ -0,0 +1,65 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package logs + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (LogConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (LogConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (LogConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in LogConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg LogConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("LogConfig", pflag.ExitOnError) + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "cloudwatch-enabled"), DefaultConfig.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "cloudwatch-region"), DefaultConfig.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "cloudwatch-log-group"), DefaultConfig.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "cloudwatch-template-uri"), DefaultConfig.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "kubernetes-enabled"), DefaultConfig.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "kubernetes-url"), DefaultConfig.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "kubernetes-template-uri"), DefaultConfig.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "stackdriver-enabled"), DefaultConfig.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "gcp-project"), DefaultConfig.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "stackdriver-logresourcename"), DefaultConfig.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "stackdriver-template-uri"), DefaultConfig.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/logs/logconfig_flags_test.go b/flyteplugins/go/tasks/logs/logconfig_flags_test.go new file mode 100755 index 0000000000..8bb775df1f --- /dev/null +++ b/flyteplugins/go/tasks/logs/logconfig_flags_test.go @@ -0,0 +1,256 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package logs + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsLogConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementLogConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsLogConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookLogConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementLogConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_LogConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookLogConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_LogConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_LogConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_LogConfig(val, result)) +} + +func testDecodeRaw_LogConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_LogConfig(vStringSlice, result)) +} + +func TestLogConfig_GetPFlagSet(t *testing.T) { + val := LogConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestLogConfig_SetFlags(t *testing.T) { + actual := LogConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("cloudwatch-enabled"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vBool), &actual.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("cloudwatch-region"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("cloudwatch-log-group"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("cloudwatch-template-uri"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("kubernetes-enabled"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vBool), &actual.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("kubernetes-url"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("kubernetes-template-uri"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("stackdriver-enabled"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vBool), &actual.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("gcp-project", testValue) + if vString, err := cmdFlags.GetString("gcp-project"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("stackdriver-logresourcename"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("stackdriver-template-uri"); err == nil { + testDecodeJson_LogConfig(t, fmt.Sprintf("%v", vString), &actual.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/logs/logging_utils.go b/flyteplugins/go/tasks/logs/logging_utils.go new file mode 100644 index 0000000000..aa003348da --- /dev/null +++ b/flyteplugins/go/tasks/logs/logging_utils.go @@ -0,0 +1,161 @@ +package logs + +import ( + "context" + "fmt" + "time" + + v1 "k8s.io/api/core/v1" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + kubernetesLogsDisplayName = "Kubernetes Logs" + cloudwatchLoggingDisplayName = "Cloudwatch Logs" + googleCloudLoggingDisplayName = "Google Cloud Logs" + FlyteEnableVscode = "_F_E_VS" +) + +// Internal +func GetLogsForContainerInPod(ctx context.Context, logPlugin tasklog.Plugin, taskExecID pluginsCore.TaskExecutionID, pod *v1.Pod, index uint32, nameSuffix string, extraLogTemplateVars []tasklog.TemplateVar, taskTemplate *core.TaskTemplate) ([]*core.TaskLog, error) { + if logPlugin == nil { + return nil, nil + } + + if pod == nil { + logger.Error(ctx, "cannot extract logs for a nil container") + return nil, nil + } + + var containerID string + if uint32(len(pod.Spec.Containers)) <= index { + logger.Errorf(ctx, "container IndexOutOfBound, requested [%d], but total containers [%d] in pod phase [%v]", index, len(pod.Spec.Containers), pod.Status.Phase) + return nil, nil + } + + if uint32(len(pod.Status.ContainerStatuses)) <= index { + logger.Errorf(ctx, "containerStatus IndexOutOfBound, requested [%d], but total containerStatuses [%d] in pod phase [%v]", index, len(pod.Status.ContainerStatuses), pod.Status.Phase) + return nil, nil + } else { + containerID = pod.Status.ContainerStatuses[index].ContainerID + } + + startTime := pod.CreationTimestamp.Unix() + finishTime := time.Now().Unix() + + enableVscode := flytek8s.IsVscodeEnabled(ctx, pod.Spec.Containers[index].Env) + logs, err := logPlugin.GetTaskLogs( + tasklog.Input{ + PodName: pod.Name, + PodUID: string(pod.GetUID()), + Namespace: pod.Namespace, + ContainerName: pod.Spec.Containers[index].Name, + ContainerID: containerID, + LogName: nameSuffix, + PodRFC3339StartTime: time.Unix(startTime, 0).Format(time.RFC3339), + PodRFC3339FinishTime: time.Unix(finishTime, 0).Format(time.RFC3339), + PodUnixStartTime: startTime, + PodUnixFinishTime: finishTime, + TaskExecutionID: taskExecID, + ExtraTemplateVars: extraLogTemplateVars, + TaskTemplate: taskTemplate, + HostName: pod.Spec.Hostname, + EnableVscode: enableVscode, + }, + ) + + if err != nil { + return nil, err + } + + return logs.TaskLogs, nil +} + +type templateLogPluginCollection struct { + plugins []tasklog.Plugin + dynamicPlugins []tasklog.Plugin +} + +func (t templateLogPluginCollection) GetTaskLogs(input tasklog.Input) (tasklog.Output, error) { + var taskLogs []*core.TaskLog + + for _, plugin := range append(t.plugins, t.dynamicPlugins...) { + o, err := plugin.GetTaskLogs(input) + if err != nil { + return tasklog.Output{}, err + } + taskLogs = append(taskLogs, o.TaskLogs...) + } + + return tasklog.Output{TaskLogs: taskLogs}, nil +} + +// InitializeLogPlugins initializes log plugin based on config. +func InitializeLogPlugins(cfg *LogConfig) (tasklog.Plugin, error) { + // Use a list to maintain order. + var plugins []tasklog.Plugin + var dynamicPlugins []tasklog.Plugin + + if cfg.IsKubernetesEnabled { + if len(cfg.KubernetesTemplateURI) > 0 { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: kubernetesLogsDisplayName, TemplateURIs: []tasklog.TemplateURI{cfg.KubernetesTemplateURI}, MessageFormat: core.TaskLog_JSON}) + } else { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: kubernetesLogsDisplayName, TemplateURIs: []tasklog.TemplateURI{fmt.Sprintf("%s/#!/log/{{ .namespace }}/{{ .podName }}/pod?namespace={{ .namespace }}", cfg.KubernetesURL)}, MessageFormat: core.TaskLog_JSON}) + } + } + + if cfg.IsCloudwatchEnabled { + if len(cfg.CloudwatchTemplateURI) > 0 { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: cloudwatchLoggingDisplayName, TemplateURIs: []tasklog.TemplateURI{cfg.CloudwatchTemplateURI}, MessageFormat: core.TaskLog_JSON}) + } else { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: cloudwatchLoggingDisplayName, TemplateURIs: []tasklog.TemplateURI{fmt.Sprintf("https://console.aws.amazon.com/cloudwatch/home?region=%s#logEventViewer:group=%s;stream=var.log.containers.{{ .podName }}_{{ .namespace }}_{{ .containerName }}-{{ .containerId }}.log", cfg.CloudwatchRegion, cfg.CloudwatchLogGroup)}, MessageFormat: core.TaskLog_JSON}) + } + } + + if cfg.IsStackDriverEnabled { + if len(cfg.StackDriverTemplateURI) > 0 { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: googleCloudLoggingDisplayName, TemplateURIs: []tasklog.TemplateURI{cfg.StackDriverTemplateURI}, MessageFormat: core.TaskLog_JSON}) + } else { + plugins = append(plugins, tasklog.TemplateLogPlugin{DisplayName: googleCloudLoggingDisplayName, TemplateURIs: []tasklog.TemplateURI{fmt.Sprintf("https://console.cloud.google.com/logs/viewer?project=%s&angularJsUrl=%%2Flogs%%2Fviewer%%3Fproject%%3D%s&resource=%s&advancedFilter=resource.labels.pod_name%%3D{{ .podName }}", cfg.GCPProjectName, cfg.GCPProjectName, cfg.StackdriverLogResourceName)}, MessageFormat: core.TaskLog_JSON}) + } + } + + for logLinkType, dynamicLogLink := range cfg.DynamicLogLinks { + dynamicPlugins = append( + dynamicPlugins, + tasklog.TemplateLogPlugin{ + Name: logLinkType, + DisplayName: dynamicLogLink.DisplayName, + DynamicTemplateURIs: dynamicLogLink.TemplateURIs, + MessageFormat: core.TaskLog_JSON, + ShowWhilePending: dynamicLogLink.ShowWhilePending, + HideOnceFinished: dynamicLogLink.HideOnceFinished, + LinkType: dynamicLogLink.LinkType, + }) + } + + plugins = append(plugins, azureTemplatePluginsToPluginSlice(cfg.AzureLogTemplates)...) + plugins = append(plugins, templatePluginToPluginSlice(cfg.Templates)...) + return templateLogPluginCollection{plugins: plugins, dynamicPlugins: dynamicPlugins}, nil +} + +func templatePluginToPluginSlice(templatePlugins []tasklog.TemplateLogPlugin) []tasklog.Plugin { + plugins := make([]tasklog.Plugin, len(templatePlugins)) + for i := range templatePlugins { + plugins[i] = &templatePlugins[i] + } + return plugins +} + +func azureTemplatePluginsToPluginSlice(templatePlugins []tasklog.AzureLogsTemplatePlugin) []tasklog.Plugin { + plugins := make([]tasklog.Plugin, len(templatePlugins)) + for i := range templatePlugins { + plugins[i] = &templatePlugins[i] + } + return plugins +} diff --git a/flyteplugins/go/tasks/logs/logging_utils_test.go b/flyteplugins/go/tasks/logs/logging_utils_test.go new file mode 100644 index 0000000000..a6c93f0a90 --- /dev/null +++ b/flyteplugins/go/tasks/logs/logging_utils_test.go @@ -0,0 +1,625 @@ +package logs + +import ( + "context" + "testing" + + "github.com/go-test/deep" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + coreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const podName = "PodName" + +func dummyTaskExecID() pluginCore.TaskExecutionID { + tID := &coreMocks.TaskExecutionID{} + tID.OnGetGeneratedName().Return("generated-name") + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Name: "my-task-name", + Project: "my-task-project", + Domain: "my-task-domain", + Version: "1", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my-execution-name", + Project: "my-execution-project", + Domain: "my-execution-domain", + }, + }, + RetryAttempt: 1, + }) + tID.OnGetUniqueNodeID().Return("n0-0-n0") + return tID +} + +func TestGetLogsForContainerInPod_NoPlugins(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{}) + assert.NoError(t, err) + l, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), nil, 0, " Suffix", nil, nil) + assert.NoError(t, err) + assert.Nil(t, l) +} + +func TestGetLogsForContainerInPod_NoLogs(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + p, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), nil, 0, " Suffix", nil, nil) + assert.NoError(t, err) + assert.Nil(t, p) +} + +func TestGetLogsForContainerInPod_BadIndex(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + p, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 1, " Suffix", nil, nil) + assert.NoError(t, err) + assert.Nil(t, p) +} + +func TestGetLogsForContainerInPod_BadIndex_WithoutStatus(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + } + pod.Name = podName + + p, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, nil) + assert.NoError(t, err) + assert.Nil(t, p) +} + +func TestGetLogsForContainerInPod_MissingStatus(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + Status: v1.PodStatus{}, + } + pod.Name = podName + + p, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 1, " Suffix", nil, nil) + assert.NoError(t, err) + assert.Nil(t, p) +} + +func TestGetLogsForContainerInPod_Cloudwatch(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, nil) + assert.Nil(t, err) + assert.Len(t, logs, 1) +} + +func TestGetLogsForContainerInPod_K8s(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": { + TemplateURIs: []tasklog.TemplateURI{"vscode://flyteinteractive:{{ .taskConfig.port }}/{{ .podName }}"}, + MessageFormat: core.TaskLog_JSON, + }, + }, + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + Env: []v1.EnvVar{ + { + Name: FlyteEnableVscode, + Value: "True", + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, &core.TaskTemplate{}) + assert.Nil(t, err) + assert.Len(t, logs, 2) +} + +func TestGetLogsForContainerInPod_All(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, nil) + assert.Nil(t, err) + assert.Len(t, logs, 2) +} + +func TestGetLogsForContainerInPod_HostName(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + IsCloudwatchEnabled: true, + CloudwatchRegion: "us-east-1", + CloudwatchLogGroup: "/kubernetes/flyte-production", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + Hostname: "my-hostname", + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, nil) + assert.Nil(t, err) + assert.Len(t, logs, 2) +} + +func TestGetLogsForContainerInPod_Stackdriver(t *testing.T) { + logPlugin, err := InitializeLogPlugins(&LogConfig{ + IsStackDriverEnabled: true, + GCPProjectName: "myGCPProject", + StackdriverLogResourceName: "aws_ec2_instance", + }) + assert.NoError(t, err) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + pod.Name = podName + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " Suffix", nil, nil) + assert.Nil(t, err) + assert.Len(t, logs, 1) +} + +func TestGetLogsForContainerInPod_LegacyTemplate(t *testing.T) { + t.Run("All Templates available", func(t *testing.T) { + assertTestSucceeded(t, &LogConfig{ + IsKubernetesEnabled: true, + KubernetesTemplateURI: "https://k8s-my-log-server/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://cw-my-log-server/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + + IsStackDriverEnabled: true, + StackDriverTemplateURI: "https://sd-my-log-server/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, nil, []*core.TaskLog{ + { + Uri: "https://k8s-my-log-server/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Kubernetes Logs my-Suffix", + Ready: true, + }, + { + Uri: "https://cw-my-log-server/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Cloudwatch Logs my-Suffix", + Ready: true, + }, + { + Uri: "https://sd-my-log-server/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Google Cloud Logs my-Suffix", + Ready: true, + }, + }, "") + }) + + t.Run("StackDriver", func(t *testing.T) { + assertTestSucceeded(t, &LogConfig{ + IsStackDriverEnabled: true, + StackDriverTemplateURI: "https://sd-my-log-server/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, nil, []*core.TaskLog{ + { + Uri: "https://sd-my-log-server/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Google Cloud Logs my-Suffix", + Ready: true, + }, + }, "") + }) +} + +func assertTestSucceeded(tb testing.TB, config *LogConfig, taskTemplate *core.TaskTemplate, expectedTaskLogs []*core.TaskLog, hostname string) { + logPlugin, err := InitializeLogPlugins(config) + assert.NoError(tb, err) + + pod := &v1.Pod{ + ObjectMeta: v12.ObjectMeta{ + Namespace: "my-namespace", + Name: "my-pod", + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "ContainerName", + }, + }, + Hostname: hostname, + }, + Status: v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + }, + }, + }, + } + + logs, err := GetLogsForContainerInPod(context.TODO(), logPlugin, dummyTaskExecID(), pod, 0, " my-Suffix", nil, taskTemplate) + assert.Nil(tb, err) + assert.Len(tb, logs, len(expectedTaskLogs)) + if diff := deep.Equal(logs, expectedTaskLogs); len(diff) > 0 { + assert.FailNowf(tb, "Not Equal.", "Diff: %v", diff) + } +} + +func TestGetLogsForContainerInPod_Templates(t *testing.T) { + assertTestSucceeded(t, &LogConfig{ + Templates: []tasklog.TemplateLogPlugin{ + { + DisplayName: "StackDriver", + TemplateURIs: []string{ + "https://my-log-server/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + MessageFormat: core.TaskLog_JSON, + LinkType: core.TaskLog_EXTERNAL.String(), + }, + { + DisplayName: "Internal", + TemplateURIs: []string{ + "https://flyte.corp.net/console/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/attempt/{{ .taskRetryAttempt }}/view/logs", + }, + MessageFormat: core.TaskLog_JSON, + LinkType: core.TaskLog_EXTERNAL.String(), + }, + }, + }, nil, []*core.TaskLog{ + { + Uri: "https://my-log-server/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "StackDriver my-Suffix", + LinkType: core.TaskLog_EXTERNAL, + Ready: true, + }, + { + Uri: "https://flyte.corp.net/console/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/n0-0-n0/taskId/my-task-name/attempt/1/view/logs", + MessageFormat: core.TaskLog_JSON, + Name: "Internal my-Suffix", + LinkType: core.TaskLog_EXTERNAL, + Ready: true, + }, + }, "") +} + +func TestGetLogsForContainerInPodTemplates_Hostname(t *testing.T) { + assertTestSucceeded(t, &LogConfig{ + Templates: []tasklog.TemplateLogPlugin{ + { + DisplayName: "StackDriver", + TemplateURIs: []string{ + "{{ .hostname }}/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + MessageFormat: core.TaskLog_JSON, + }, + }, + }, nil, []*core.TaskLog{ + { + Uri: "my-hostname/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "StackDriver my-Suffix", + Ready: true, + }, + }, "my-hostname") +} + +func TestGetLogsForContainerInPod_Flyteinteractive(t *testing.T) { + tests := []struct { + name string + config *LogConfig + template *core.TaskTemplate + expectedTaskLogs []*core.TaskLog + }{ + { + "Flyteinteractive enabled but no task template", + &LogConfig{ + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": tasklog.TemplateLogPlugin{ + DisplayName: "vscode link", + TemplateURIs: []tasklog.TemplateURI{ + "https://flyteinteractive.mydomain.com:{{ .taskConfig.port }}/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + }, + }, + }, + nil, + nil, + }, + { + "Flyteinteractive enabled but config not found in task template", + &LogConfig{ + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": tasklog.TemplateLogPlugin{ + DisplayName: "vscode link", + TemplateURIs: []tasklog.TemplateURI{ + "https://flyteinteractive.mydomain.com:{{ .taskConfig.port }}/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + }, + }, + }, + &core.TaskTemplate{}, + nil, + }, + { + "Flyteinteractive disabled but config present in TaskTemplate", + &LogConfig{}, + &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "65535", + }, + }, + nil, + }, + { + "Flyteinteractive - multiple dynamic options", + &LogConfig{ + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": tasklog.TemplateLogPlugin{ + DisplayName: "vscode link", + TemplateURIs: []tasklog.TemplateURI{ + "https://abc.com:{{ .taskConfig.port }}/{{ .taskConfig.route }}", + }, + LinkType: core.TaskLog_IDE.String(), + }, + }, + }, + &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "65535", + "route": "a-route", + }, + }, + []*core.TaskLog{ + { + Uri: "https://abc.com:65535/a-route", + MessageFormat: core.TaskLog_JSON, + Name: "vscode link my-Suffix", + LinkType: core.TaskLog_IDE, + Ready: true, + }, + }, + }, + { + "Flyteinteractive - multiple uses of the template (invalid use of ports in a URI)", + &LogConfig{ + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": tasklog.TemplateLogPlugin{ + DisplayName: "vscode link", + TemplateURIs: []tasklog.TemplateURI{ + "https://abc.com:{{ .taskConfig.port }}:{{ .taskConfig.port}}", + }, + }, + }, + }, + &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "65535", + }, + }, + []*core.TaskLog{ + { + Uri: "https://abc.com:65535:65535", + MessageFormat: core.TaskLog_JSON, + Name: "vscode link my-Suffix", + LinkType: core.TaskLog_IDE, + Ready: true, + }, + }, + }, + { + "Flyteinteractive disabled and K8s enabled and flyteinteractive config present in TaskTemplate", + &LogConfig{ + IsKubernetesEnabled: true, + KubernetesTemplateURI: "https://k8s.com/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "65535", + }, + }, + []*core.TaskLog{ + { + Uri: "https://k8s.com/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Kubernetes Logs my-Suffix", + Ready: true, + }, + }, + }, + { + "Flyteinteractive and K8s enabled", + &LogConfig{ + IsKubernetesEnabled: true, + KubernetesTemplateURI: "https://k8s.com/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + DynamicLogLinks: map[string]tasklog.TemplateLogPlugin{ + "vscode": tasklog.TemplateLogPlugin{ + DisplayName: "vscode link", + TemplateURIs: []tasklog.TemplateURI{ + "https://flyteinteractive.mydomain.com:{{ .taskConfig.port }}/{{ .namespace }}/{{ .podName }}/{{ .containerName }}/{{ .containerId }}", + }, + }, + }, + }, + &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "65535", + }, + }, + []*core.TaskLog{ + { + Uri: "https://k8s.com/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "Kubernetes Logs my-Suffix", + LinkType: core.TaskLog_EXTERNAL, + Ready: true, + }, + { + Uri: "https://flyteinteractive.mydomain.com:65535/my-namespace/my-pod/ContainerName/ContainerID", + MessageFormat: core.TaskLog_JSON, + Name: "vscode link my-Suffix", + LinkType: core.TaskLog_IDE, + Ready: true, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assertTestSucceeded(t, tt.config, tt.template, tt.expectedTaskLogs, "") + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go new file mode 100644 index 0000000000..2d1bb85b74 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast.go @@ -0,0 +1,66 @@ +package bundle + +import ( + "context" + "fmt" + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginMachinery "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" +) + +const failFastExecutorName = "fail-fast" + +type failFastHandler struct{} + +func (h failFastHandler) GetID() string { + return failFastExecutorName +} + +func (h failFastHandler) GetProperties() core.PluginProperties { + return core.PluginProperties{} +} + +func (h failFastHandler) Handle(ctx context.Context, tCtx core.TaskExecutionContext) (core.Transition, error) { + occuredAt := time.Now() + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return core.UnknownTransition, + errors.Errorf(errors.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } + return core.DoTransition(core.PhaseInfoFailure("AlwaysFail", + fmt.Sprintf("Task [%s] type [%+v] not supported by platform for this project/domain/workflow", + taskTemplate.Type, tCtx.TaskExecutionMetadata().GetTaskExecutionID()), &core.TaskInfo{ + OccurredAt: &occuredAt, + })), nil +} + +func (h failFastHandler) Abort(_ context.Context, _ core.TaskExecutionContext) error { + return nil +} + +func (h failFastHandler) Finalize(_ context.Context, _ core.TaskExecutionContext) error { + return nil +} + +func failFastPluginLoader(_ context.Context, _ core.SetupContext) (core.Plugin, error) { + return &failFastHandler{}, nil +} + +func init() { + // TODO(katrogan): Once we move pluginmachinery to flyteidl make these task types named constants that flyteplugins + // can reference in other handler definitions. + // NOTE: these should match the constants defined flytekit + taskTypes := []core.TaskType{ + "container", "sidecar", "container_array", "hive", "presto", "spark", "pytorch", + "sagemaker_custom_training_job_task", "sagemaker_training_job_task", "sagemaker_hyperparameter_tuning_job_task", + } + pluginMachinery.PluginRegistry().RegisterCorePlugin( + core.PluginEntry{ + ID: failFastExecutorName, + RegisteredTaskTypes: taskTypes, + LoadPlugin: failFastPluginLoader, + IsDefault: false, + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go new file mode 100644 index 0000000000..a5ce3f2552 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/bundle/fail_fast_test.go @@ -0,0 +1,63 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var testHandler = failFastHandler{} + +func TestFailFastGetID(t *testing.T) { + assert.Equal(t, "fail-fast", testHandler.GetID()) +} + +func TestGetProperties(t *testing.T) { + assert.Empty(t, testHandler.GetProperties()) +} + +func TestHandleAlwaysFails(t *testing.T) { + tID := &mocks.TaskExecutionID{} + tID.On("GetID").Return(idlCore.TaskExecutionIdentifier{ + NodeExecutionId: &idlCore.NodeExecutionIdentifier{ + ExecutionId: &idlCore.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.On("GetTaskExecutionID").Return(tID) + + taskCtx := &mocks.TaskExecutionContext{} + taskCtx.On("TaskExecutionMetadata").Return(taskExecutionMetadata) + taskReader := &mocks.TaskReader{} + taskReader.On("Read", mock.Anything).Return(&idlCore.TaskTemplate{ + Type: "unsupportedtype", + }, nil) + taskCtx.On("TaskReader").Return(taskReader) + + transition, err := testHandler.Handle(context.TODO(), taskCtx) + assert.NoError(t, err) + assert.Equal(t, core.PhasePermanentFailure, transition.Info().Phase()) + assert.Equal(t, "AlwaysFail", transition.Info().Err().Code) + assert.Contains(t, transition.Info().Err().Message, "Task [unsupportedtype]") +} + +func TestAbort(t *testing.T) { + err := testHandler.Abort(context.TODO(), nil) + assert.NoError(t, err) +} + +func TestFinalize(t *testing.T) { + err := testHandler.Finalize(context.TODO(), nil) + assert.NoError(t, err) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client.go b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client.go new file mode 100644 index 0000000000..bf973e7aae --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client.go @@ -0,0 +1,83 @@ +package catalog + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/bitarray" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" +) + +type ResponseStatus uint8 + +const ( + ResponseStatusNotReady ResponseStatus = iota + ResponseStatusReady +) + +const ( + ErrResponseNotReady errors.ErrorCode = "RESPONSE_NOT_READY" + ErrSystemError errors.ErrorCode = "SYSTEM_ERROR" +) + +type UploadRequest struct { + Key Key + ArtifactData io.OutputReader + ArtifactMetadata Metadata +} + +type ReadyHandler func(ctx context.Context, future Future) + +// A generic Future interface to represent async operations results +type Future interface { + // Gets the response status for the future. If the future represents multiple operations, the status will only be + // ready if all of them are. + GetResponseStatus() ResponseStatus + + // Sets a callback handler to be called when the future status changes to ready. + OnReady(handler ReadyHandler) + + GetResponseError() error +} + +// Catalog Sidecar future to represent async process of uploading catalog artifacts. +type UploadFuture interface { + Future +} + +// Catalog Download Request to represent async operation download request. +type DownloadRequest struct { + Key Key + Target io.OutputWriter +} + +// Catalog download future to represent async process of downloading catalog artifacts. +type DownloadFuture interface { + Future + + // Gets the actual response from the future. This will return an error if the future isn't ready yet. + GetResponse() (DownloadResponse, error) +} + +// Catalog download response. +type DownloadResponse interface { + // Gets a bit set representing which items from the request were cached. + GetCachedResults() *bitarray.BitSet + + // Gets the total size of the cached result. + GetResultsSize() int + + // A convenience method to retrieve the number of cached items. + GetCachedCount() int +} + +// An interface that helps async interaction with catalog service +type AsyncClient interface { + // Returns if an entry exists for the given task and input. It returns the data as a LiteralMap + Download(ctx context.Context, requests ...DownloadRequest) (outputFuture DownloadFuture, err error) + + // Adds a new entry to catalog for the given task execution context and the generated output + Upload(ctx context.Context, requests ...UploadRequest) (putFuture UploadFuture, err error) +} + +var _ AsyncClient = AsyncClientImpl{} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go new file mode 100644 index 0000000000..0561acd9ea --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl.go @@ -0,0 +1,171 @@ +package catalog + +import ( + "context" + "encoding/base32" + "fmt" + "hash/fnv" + "reflect" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + "github.com/flyteorg/flyte/v2/flytestdlib/bitarray" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const specialEncoderKey = "abcdefghijklmnopqrstuvwxyz123456" + +var base32Encoder = base32.NewEncoding(specialEncoderKey).WithPadding(base32.NoPadding) + +// An async-client for catalog that can queue download and upload requests on workqueues. +type AsyncClientImpl struct { + Reader workqueue.IndexedWorkQueue + Writer workqueue.IndexedWorkQueue +} + +func formatWorkItemID(key Key, idx int, suffix string) string { + return fmt.Sprintf("%v-%v-%v", key, idx, suffix) +} + +func consistentHash(str string) (string, error) { + hasher := fnv.New32a() + _, err := hasher.Write([]byte(str)) + if err != nil { + return "", err + } + + b := hasher.Sum(nil) + return base32Encoder.EncodeToString(b), nil +} + +func hashInputs(ctx context.Context, key Key) (string, error) { + inputs := &core.LiteralMap{} + if key.TypedInterface.Inputs != nil { + retInputs, err := key.InputReader.Get(ctx) + if err != nil { + return "", err + } + inputs = retInputs + } + return HashLiteralMap(ctx, inputs, key.CacheIgnoreInputVars) +} + +func (c AsyncClientImpl) Download(ctx context.Context, requests ...DownloadRequest) (outputFuture DownloadFuture, err error) { + status := ResponseStatusReady + cachedResults := bitarray.NewBitSet(uint(len(requests))) + cachedCount := 0 + var respErr error + for idx, request := range requests { + uniqueOutputLoc, err := consistentHash(request.Target.GetOutputPrefixPath().String()) + if err != nil { + return nil, err + } + + workItemID := formatWorkItemID(request.Key, idx, uniqueOutputLoc) + err = c.Reader.Queue(ctx, workItemID, NewReaderWorkItem( + request.Key, + request.Target)) + + if err != nil { + return nil, err + } + + info, found, err := c.Reader.Get(workItemID) + if err != nil { + return nil, errors.Wrapf(ErrSystemError, err, "Failed to lookup from reader workqueue for info: %v", workItemID) + } + + if !found { + return nil, errors.Errorf(ErrSystemError, "Item not found in the reader workqueue even though it was just added. ItemID: %v", workItemID) + } + + switch info.Status() { + case workqueue.WorkStatusSucceeded: + readerWorkItem, casted := info.Item().(*ReaderWorkItem) + if !casted { + return nil, errors.Errorf(ErrSystemError, "Item wasn't casted to ReaderWorkItem. ItemID: %v. Type: %v", workItemID, reflect.TypeOf(info)) + } + + if readerWorkItem.IsCached() { + cachedResults.Set(uint(idx)) + cachedCount++ + } + case workqueue.WorkStatusFailed: + respErr = info.Error() + case workqueue.WorkStatusNotDone: + status = ResponseStatusNotReady + } + } + + return newDownloadFuture(status, respErr, cachedResults, len(requests), cachedCount), nil +} + +func (c AsyncClientImpl) Upload(ctx context.Context, requests ...UploadRequest) (putFuture UploadFuture, err error) { + status := ResponseStatusReady + var respErr error + for idx, request := range requests { + inputHash, err := hashInputs(ctx, request.Key) + if err != nil { + return nil, errors.Wrapf(ErrSystemError, err, "Failed to hash inputs for item: %v", request.Key) + } + workItemID := formatWorkItemID(request.Key, idx, inputHash) + err = c.Writer.Queue(ctx, workItemID, NewWriterWorkItem( + request.Key, + request.ArtifactData, + request.ArtifactMetadata)) + + if err != nil { + return nil, err + } + + info, found, err := c.Writer.Get(workItemID) + if err != nil { + return nil, errors.Wrapf(ErrSystemError, err, "Failed to lookup from writer workqueue for info: %v", workItemID) + } + + if !found { + return nil, errors.Errorf(ErrSystemError, "Item not found in the writer workqueue even though it was just added. ItemID: %v", workItemID) + } + + switch info.Status() { + case workqueue.WorkStatusNotDone: + status = ResponseStatusNotReady + case workqueue.WorkStatusFailed: + respErr = info.Error() + } + } + + return newUploadFuture(status, respErr), nil +} + +func (c AsyncClientImpl) Start(ctx context.Context) error { + if err := c.Reader.Start(ctx); err != nil { + return errors.Wrapf(ErrSystemError, err, "Failed to start reader queue.") + } + + if err := c.Writer.Start(ctx); err != nil { + return errors.Wrapf(ErrSystemError, err, "Failed to start writer queue.") + } + + return nil +} + +func NewAsyncClient(client Client, cfg Config, scope promutils.Scope) (AsyncClientImpl, error) { + readerWorkQueue, err := workqueue.NewIndexedWorkQueue("reader", NewReaderProcessor(client), cfg.ReaderWorkqueueConfig, + scope.NewSubScope("reader")) + if err != nil { + return AsyncClientImpl{}, err + } + + writerWorkQueue, err := workqueue.NewIndexedWorkQueue("writer", NewWriterProcessor(client), cfg.WriterWorkqueueConfig, + scope.NewSubScope("writer")) + if err != nil { + return AsyncClientImpl{}, err + } + + return AsyncClientImpl{ + Reader: readerWorkQueue, + Writer: writerWorkQueue, + }, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl_test.go new file mode 100644 index 0000000000..fd399e475e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/async_client_impl_test.go @@ -0,0 +1,212 @@ +package catalog + +import ( + "context" + "reflect" + "testing" + + "github.com/stretchr/testify/mock" + + mocks2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/bitarray" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var exampleInterface = &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "a": { + Type: &core.LiteralType{ + Type: &core.LiteralType_Simple{ + Simple: core.SimpleType_INTEGER, + }, + }, + }, + }, + }, +} +var input1 = &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "a": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 1, + }, + }, + }, + }, + }, + }, + }, +} +var input2 = &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "a": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 2, + }, + }, + }, + }, + }, + }, + }, +} + +func TestAsyncClientImpl_Download(t *testing.T) { + ctx := context.Background() + + q := &mocks.IndexedWorkQueue{} + info := &mocks.WorkItemInfo{} + info.OnItem().Return(NewReaderWorkItem(Key{}, &mocks2.OutputWriter{})) + info.OnStatus().Return(workqueue.WorkStatusSucceeded) + q.OnGetMatch(mock.Anything).Return(info, true, nil) + q.OnQueueMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + ow := &mocks2.OutputWriter{} + ow.OnGetOutputPrefixPath().Return("/prefix/") + ow.OnGetOutputPath().Return("/prefix/outputs.pb") + + tests := []struct { + name string + reader workqueue.IndexedWorkQueue + requests []DownloadRequest + wantOutputFuture DownloadFuture + wantErr bool + }{ + {"DownloadQueued", q, []DownloadRequest{ + { + Key: Key{}, + Target: ow, + }, + }, newDownloadFuture(ResponseStatusReady, nil, bitarray.NewBitSet(1), 1, 0), false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := AsyncClientImpl{ + Reader: tt.reader, + } + gotOutputFuture, err := c.Download(ctx, tt.requests...) + if (err != nil) != tt.wantErr { + t.Errorf("AsyncClientImpl.Download() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotOutputFuture, tt.wantOutputFuture) { + t.Errorf("AsyncClientImpl.Download() = %v, want %v", gotOutputFuture, tt.wantOutputFuture) + } + }) + } +} + +func TestAsyncClientImpl_Upload(t *testing.T) { + ctx := context.Background() + + inputHash1 := "{{{} [] [] } 0 [] UNSPECIFIED }:-0-DNhkpTTPC5YDtRGb4yT-PFxgMSgHzHrKAQKgQGEfGRY" + inputHash2 := "{{{} [] [] } 0 [] UNSPECIFIED }:-1-26M4dwarvBVJqJSUC4JC1GtRYgVBIAmQfsFSdLVMlAc" + + q := &mocks.IndexedWorkQueue{} + info := &mocks.WorkItemInfo{} + info.OnItem().Return(NewReaderWorkItem(Key{}, &mocks2.OutputWriter{})) + info.OnStatus().Return(workqueue.WorkStatusSucceeded) + q.OnGetMatch(mock.Anything).Return(info, true, nil) + q.OnGetMatch(mock.Anything).Return(info, true, nil) + q.OnQueueMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + inputReader1 := &mocks2.InputReader{} + inputReader1.OnGetMatch(mock.Anything).Return(input1, nil) + inputReader2 := &mocks2.InputReader{} + inputReader2.OnGetMatch(mock.Anything).Return(input2, nil) + + tests := []struct { + name string + requests []UploadRequest + wantPutFuture UploadFuture + wantErr bool + }{ + { + "UploadSucceeded", + // The second request has the same Key.Identifier and Key.Cache version but a different + // Key.InputReader. This should lead to a different WorkItemID in the queue. + // See https://github.com/flyteorg/flyte/issues/3787 for more details + []UploadRequest{ + { + Key: Key{ + TypedInterface: *exampleInterface, + InputReader: inputReader1, + }, + }, + { + Key: Key{ + TypedInterface: *exampleInterface, + InputReader: inputReader2, + }, + }, + }, + newUploadFuture(ResponseStatusReady, nil), + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := AsyncClientImpl{ + Writer: q, + } + gotPutFuture, err := c.Upload(ctx, tt.requests...) + if (err != nil) != tt.wantErr { + t.Errorf("AsyncClientImpl.Sidecar() error = %v, wantErr %v", err, tt.wantErr) + return + } + if !reflect.DeepEqual(gotPutFuture, tt.wantPutFuture) { + t.Errorf("AsyncClientImpl.Sidecar() = %v, want %v", gotPutFuture, tt.wantPutFuture) + } + expectedWorkItemIDs := []string{inputHash1, inputHash2} + gottenWorkItemIDs := make([]string, 0) + for _, mockCall := range q.Calls { + if mockCall.Method == "Get" { + gottenWorkItemIDs = append(gottenWorkItemIDs, mockCall.Arguments[0].(string)) + } + } + if !reflect.DeepEqual(gottenWorkItemIDs, expectedWorkItemIDs) { + t.Errorf("Retrieved workitem IDs = \n|%v|, want \n|%v|", gottenWorkItemIDs, expectedWorkItemIDs) + } + }) + } +} + +func TestAsyncClientImpl_Start(t *testing.T) { + type fields struct { + Reader workqueue.IndexedWorkQueue + Writer workqueue.IndexedWorkQueue + } + type args struct { + ctx context.Context + } + tests := []struct { + name string + fields fields + args args + wantErr bool + }{ + // TODO: Add test cases. + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := AsyncClientImpl{ + Reader: tt.fields.Reader, + Writer: tt.fields.Writer, + } + if err := c.Start(tt.args.ctx); (err != nil) != tt.wantErr { + t.Errorf("AsyncClientImpl.Start() error = %v, wantErr %v", err, tt.wantErr) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/client.go b/flyteplugins/go/tasks/pluginmachinery/catalog/client.go new file mode 100644 index 0000000000..b51b33f453 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/client.go @@ -0,0 +1,166 @@ +package catalog + +import ( + "context" + "fmt" + "time" + + "google.golang.org/grpc/codes" + grpcStatus "google.golang.org/grpc/status" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog" +) + +//go:generate mockery -all -case=underscore + +// Metadata to be associated with the catalog object +type Metadata struct { + WorkflowExecutionIdentifier *core.WorkflowExecutionIdentifier + NodeExecutionIdentifier *core.NodeExecutionIdentifier + TaskExecutionIdentifier *core.TaskExecutionIdentifier + CreatedAt *timestamppb.Timestamp +} + +// An identifier for a catalog object. +type Key struct { + Identifier core.Identifier + CacheVersion string + CacheIgnoreInputVars []string + TypedInterface core.TypedInterface + InputReader io.InputReader + CacheKey string +} + +type ReservationCache struct { + Timestamp time.Time + ReservationStatus core.CatalogReservation_Status +} + +func (k Key) String() string { + return fmt.Sprintf("%v:%v", k.Identifier, k.CacheVersion) +} + +// Indicates that status of the query to Catalog. This can be returned for both Get and Put calls +type Status struct { + cacheStatus core.CatalogCacheStatus + metadata *core.CatalogMetadata +} + +func (s Status) GetCacheStatus() core.CatalogCacheStatus { + return s.cacheStatus +} + +func (s Status) GetMetadata() *core.CatalogMetadata { + return s.metadata +} + +func NewPutFailureStatus(key *Key) Status { + md := &core.CatalogMetadata{ + DatasetId: &key.Identifier, + } + return Status{cacheStatus: core.CatalogCacheStatus_CACHE_PUT_FAILURE, metadata: md} +} + +func NewStatus(cacheStatus core.CatalogCacheStatus, md *core.CatalogMetadata) Status { + return Status{cacheStatus: cacheStatus, metadata: md} +} + +// Indicates the Entry in Catalog that was populated +type Entry struct { + outputs io.OutputReader + status Status +} + +func (e Entry) GetOutputs() io.OutputReader { + return e.outputs +} + +func (e Entry) GetStatus() Status { + return e.status +} + +func NewFailedCatalogEntry(status Status) Entry { + return Entry{status: status} +} + +func NewCatalogEntry(outputs io.OutputReader, status Status) Entry { + return Entry{outputs: outputs, status: status} +} + +// ReservationEntry encapsulates the current state of an artifact reservation within the catalog +type ReservationEntry struct { + expiresAt time.Time + heartbeatInterval time.Duration + ownerID string + status core.CatalogReservation_Status +} + +// Returns the expiration timestamp at which the reservation will no longer be valid +func (r ReservationEntry) GetExpiresAt() time.Time { + return r.expiresAt +} + +// Returns the heartbeat interval, denoting how often the catalog expects a reservation extension request +func (r ReservationEntry) GetHeartbeatInterval() time.Duration { + return r.heartbeatInterval +} + +// Returns the ID of the current reservation owner +func (r ReservationEntry) GetOwnerID() string { + return r.ownerID +} + +// Returns the status of the attempted reservation operation +func (r ReservationEntry) GetStatus() core.CatalogReservation_Status { + return r.status +} + +// Creates a new ReservationEntry using the status, all other fields are set to default values +func NewReservationEntryStatus(status core.CatalogReservation_Status) ReservationEntry { + duration := 0 * time.Second + return ReservationEntry{ + expiresAt: time.Time{}, + heartbeatInterval: duration, + ownerID: "", + status: status, + } +} + +// Creates a new ReservationEntry populated with the specified parameters +func NewReservationEntry(expiresAt time.Time, heartbeatInterval time.Duration, ownerID string, status core.CatalogReservation_Status) ReservationEntry { + return ReservationEntry{ + expiresAt: expiresAt, + heartbeatInterval: heartbeatInterval, + ownerID: ownerID, + status: status, + } +} + +// Client represents the default Catalog client that allows memoization and indexing of intermediate data in Flyte +type Client interface { + // Get returns the artifact associated with the given key. + Get(ctx context.Context, key Key) (Entry, error) + // GetOrExtendReservation tries to retrieve a (valid) reservation for the given key, creating a new one using the + // specified owner ID if none was found or updating an existing one if it has expired. + GetOrExtendReservation(ctx context.Context, key Key, ownerID string, heartbeatInterval time.Duration) (*datacatalog.Reservation, error) + // Put stores the given data using the specified key, creating artifact entries as required. + // To update an existing artifact, use Update instead. + Put(ctx context.Context, key Key, reader io.OutputReader, metadata Metadata) (Status, error) + // Update updates existing data stored at the specified key, overwriting artifact entries with the new data provided. + // To create a new (non-existent) artifact, use Put instead. + Update(ctx context.Context, key Key, reader io.OutputReader, metadata Metadata) (Status, error) + // ReleaseReservation releases an acquired reservation for the given key and owner ID. + ReleaseReservation(ctx context.Context, key Key, ownerID string) error + // GetReservationCache checks the reservation cache for the given owner ID + GetReservationCache(ownerID string) ReservationCache + // UpdateReservationCache updates the reservation cache for the given owner ID + UpdateReservationCache(ownerID string, entry ReservationCache) +} + +func IsNotFound(err error) bool { + taskStatus, ok := grpcStatus.FromError(err) + return ok && taskStatus.Code() == codes.NotFound +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go new file mode 100644 index 0000000000..2b53d80815 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/client_test.go @@ -0,0 +1,123 @@ +package catalog + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var ( + cacheStatus = core.CatalogCacheStatus_CACHE_MISS + catalogMetadata = core.CatalogMetadata{ + DatasetId: &core.Identifier{ + Project: "project", + Domain: "domain", + Name: "name", + }, + ArtifactTag: &core.CatalogArtifactTag{ + ArtifactId: "artifactID", + Name: "artifactName", + }, + } + key = &Key{ + Identifier: core.Identifier{ + Project: "project", + Domain: "domain", + Name: "name", + Version: "1.0.0", + }, + CacheVersion: "1.0.0", + TypedInterface: core.TypedInterface{ + Inputs: nil, + Outputs: nil, + }, + } +) + +func TestNewPutFailureStatus(t *testing.T) { + status := NewPutFailureStatus(key) + + assert.Equal(t, status.GetCacheStatus(), core.CatalogCacheStatus_CACHE_PUT_FAILURE) + assert.EqualValues(t, status.GetMetadata().GetDatasetId(), &key.Identifier) +} + +func TestStatus(t *testing.T) { + status := NewStatus(cacheStatus, &catalogMetadata) + + assert.Equal(t, status.GetCacheStatus(), cacheStatus) + assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project) + assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain) + assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name) + assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId) + assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name) +} + +func TestEntry(t *testing.T) { + tests := []struct { + name string + entry Entry + }{ + { + "base", + NewCatalogEntry(&mocks.OutputReader{}, NewStatus(cacheStatus, &catalogMetadata)), + }, + { + "failed", + NewFailedCatalogEntry(NewStatus(cacheStatus, &catalogMetadata)), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + status := tt.entry.GetStatus() + assert.Equal(t, status.GetCacheStatus(), cacheStatus) + assert.Equal(t, status.GetMetadata().DatasetId.Project, catalogMetadata.DatasetId.Project) + assert.Equal(t, status.GetMetadata().DatasetId.Domain, catalogMetadata.DatasetId.Domain) + assert.Equal(t, status.GetMetadata().DatasetId.Name, catalogMetadata.DatasetId.Name) + assert.Equal(t, status.GetMetadata().ArtifactTag.ArtifactId, catalogMetadata.ArtifactTag.ArtifactId) + assert.Equal(t, status.GetMetadata().ArtifactTag.Name, catalogMetadata.ArtifactTag.Name) + }) + } +} + +func TestReservationEntry(t *testing.T) { + reservationStatus := core.CatalogReservation_RESERVATION_ACQUIRED + tests := []struct { + name string + reservationEntry ReservationEntry + expiresAt time.Time + heartbeatInterval time.Duration + ownerID string + status core.CatalogReservation_Status + }{ + { + "base", + NewReservationEntry(time.Time{}, 5*time.Second, "owner", reservationStatus), + time.Time{}, + 5 * time.Second, + "owner", + reservationStatus, + }, + { + "status", + NewReservationEntryStatus(reservationStatus), + time.Time{}, + 0 * time.Second, + "", + reservationStatus, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.reservationEntry.GetExpiresAt(), tt.expiresAt) + assert.Equal(t, tt.reservationEntry.GetHeartbeatInterval(), tt.heartbeatInterval) + assert.Equal(t, tt.reservationEntry.GetOwnerID(), tt.ownerID) + assert.Equal(t, tt.reservationEntry.GetStatus(), tt.status) + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/config.go b/flyteplugins/go/tasks/pluginmachinery/catalog/config.go new file mode 100644 index 0000000000..f37aca05a8 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/config.go @@ -0,0 +1,37 @@ +package catalog + +import ( + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" +) + +//go:generate pflags Config --default-var=defaultConfig + +var cfgSection = config.MustRegisterSubSection("catalogCache", defaultConfig) + +type Config struct { + ReaderWorkqueueConfig workqueue.Config `json:"reader" pflag:",Catalog reader workqueue config. Make sure the index cache must be big enough to accommodate the biggest array task allowed to run on the system."` + WriterWorkqueueConfig workqueue.Config `json:"writer" pflag:",Catalog writer workqueue config. Make sure the index cache must be big enough to accommodate the biggest array task allowed to run on the system."` + CacheKey CacheKeyConfig `json:"cacheKey" pflag:",Cache key configuration."` +} + +type CacheKeyConfig struct { + EnforceExecutionProjectDomain bool `json:"enforceExecutionProjectDomain" pflag:", Use execution project domain when computing the cache key. This means that even if you reference tasks/launchplans from a different project, cache keys will be computed based on the execution project domain instead."` +} + +var defaultConfig = &Config{ + ReaderWorkqueueConfig: workqueue.Config{ + MaxRetries: 3, + Workers: 10, + IndexCacheMaxItems: 10000, + }, + WriterWorkqueueConfig: workqueue.Config{ + MaxRetries: 3, + Workers: 10, + IndexCacheMaxItems: 10000, + }, +} + +func GetConfig() *Config { + return cfgSection.GetConfig().(*Config) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags.go b/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags.go new file mode 100755 index 0000000000..0f62ec99ae --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags.go @@ -0,0 +1,60 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package catalog + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "reader.workers"), defaultConfig.ReaderWorkqueueConfig.Workers, "Number of concurrent workers to start processing the queue.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "reader.maxRetries"), defaultConfig.ReaderWorkqueueConfig.MaxRetries, "Maximum number of retries per item.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "reader.maxItems"), defaultConfig.ReaderWorkqueueConfig.IndexCacheMaxItems, "Maximum number of entries to keep in the index.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "writer.workers"), defaultConfig.WriterWorkqueueConfig.Workers, "Number of concurrent workers to start processing the queue.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "writer.maxRetries"), defaultConfig.WriterWorkqueueConfig.MaxRetries, "Maximum number of retries per item.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "writer.maxItems"), defaultConfig.WriterWorkqueueConfig.IndexCacheMaxItems, "Maximum number of entries to keep in the index.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags_test.go new file mode 100755 index 0000000000..57a4397361 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/config_flags_test.go @@ -0,0 +1,186 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package catalog + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_reader.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("reader.workers", testValue) + if vInt, err := cmdFlags.GetInt("reader.workers"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.ReaderWorkqueueConfig.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_reader.maxRetries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("reader.maxRetries", testValue) + if vInt, err := cmdFlags.GetInt("reader.maxRetries"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.ReaderWorkqueueConfig.MaxRetries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_reader.maxItems", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("reader.maxItems", testValue) + if vInt, err := cmdFlags.GetInt("reader.maxItems"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.ReaderWorkqueueConfig.IndexCacheMaxItems) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_writer.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("writer.workers", testValue) + if vInt, err := cmdFlags.GetInt("writer.workers"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WriterWorkqueueConfig.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_writer.maxRetries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("writer.maxRetries", testValue) + if vInt, err := cmdFlags.GetInt("writer.maxRetries"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WriterWorkqueueConfig.MaxRetries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_writer.maxItems", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("writer.maxItems", testValue) + if vInt, err := cmdFlags.GetInt("writer.maxItems"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WriterWorkqueueConfig.IndexCacheMaxItems) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go new file mode 100644 index 0000000000..cee29eba76 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing.go @@ -0,0 +1,102 @@ +package catalog + +import ( + "context" + "encoding/base64" + + "k8s.io/utils/strings/slices" + + "github.com/flyteorg/flyte/v2/flytestdlib/pbhash" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var emptyLiteralMap = core.LiteralMap{Literals: map[string]*core.Literal{}} + +// Hashify a literal, in other words, produce a new literal where the corresponding value is removed in case +// the literal hash is set. +func hashify(literal *core.Literal) *core.Literal { + // If the hash is set, return an empty literal with the same hash, + // regardless of type (scalar/collection/map). + if literal.GetHash() != "" { + return &core.Literal{ + Hash: literal.GetHash(), + } + } + + // Two recursive cases: + // 1. A collection of literals or + // 2. A map of literals + if literal.GetCollection() != nil { + literals := literal.GetCollection().Literals + literalsHash := make([]*core.Literal, 0) + for _, lit := range literals { + literalsHash = append(literalsHash, hashify(lit)) + } + return &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: literalsHash, + }, + }, + } + } + if literal.GetMap() != nil { + literalsMap := make(map[string]*core.Literal) + for key, lit := range literal.GetMap().Literals { + literalsMap[key] = hashify(lit) + } + return &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: literalsMap, + }, + }, + } + } + + return literal +} + +func HashLiteralMap(ctx context.Context, literalMap *core.LiteralMap, cacheIgnoreInputVars []string) (string, error) { + if literalMap == nil || len(literalMap.Literals) == 0 { + literalMap = &emptyLiteralMap + } + + // Hashify, i.e. generate a copy of the literal map where each literal value is removed + // in case the corresponding hash is set. + hashifiedLiteralMap := make(map[string]*core.Literal, len(literalMap.Literals)) + for name, literal := range literalMap.Literals { + if !slices.Contains(cacheIgnoreInputVars, name) { + hashifiedLiteralMap[name] = hashify(literal) + } + } + hashifiedInputs := &core.LiteralMap{ + Literals: hashifiedLiteralMap, + } + + inputsHash, err := pbhash.ComputeHash(ctx, hashifiedInputs) + if err != nil { + return "", err + } + + return base64.RawURLEncoding.EncodeToString(inputsHash), nil +} + +func HashIdentifierExceptVersion(ctx context.Context, id core.Identifier) (string, error) { + + // Exclude version from the ID hash to support cache hits across different versions of the same resource + idCopy := &core.Identifier{ + ResourceType: id.ResourceType, + Project: id.Project, + Domain: id.Domain, + Name: id.Name, + Org: id.Org, + } + + hash, err := pbhash.ComputeHashString(ctx, idCopy) + if err != nil { + return "", err + } + + return hash, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/hashing_test.go b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing_test.go new file mode 100644 index 0000000000..3300f47e2e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/hashing_test.go @@ -0,0 +1,699 @@ +package catalog + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestHashLiteralMap_LiteralsWithHashSet(t *testing.T) { + tests := []struct { + name string + literal *core.Literal + expectedLiteral *core.Literal + }{ + { + name: "single literal where hash is not set", + literal: coreutils.MustMakeLiteral(42), + expectedLiteral: coreutils.MustMakeLiteral(42), + }, + { + name: "single literal containing hash", + literal: &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "abcde", + }, + expectedLiteral: &core.Literal{ + Value: nil, + Hash: "abcde", + }, + }, + { + name: "list of literals containing a single item where literal sets its hash", + literal: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "hash1", + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: nil, + Hash: "hash1", + }, + }, + }, + }, + }, + }, + { + name: "list of literals containing two items where each literal sets its hash", + literal: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "hash1", + }, + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://another-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "hash2", + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: nil, + Hash: "hash1", + }, + { + Value: nil, + Hash: "hash2", + }, + }, + }, + }, + }, + }, + { + name: "list of literals containing two items where only one literal has its hash set", + literal: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "hash1", + }, + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://another-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: nil, + Hash: "hash1", + }, + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://another-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "map of literals containing a single item where literal sets its hash", + literal: &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "hash-42", + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: nil, + Hash: "hash-42", + }, + }, + }, + }, + }, + }, + { + name: "map of literals containing a three items where only one literal sets its hash", + literal: &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + "literal2-set-its-hash": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-2", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "literal-2-hash", + }, + "literal3": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-3", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + "literal2-set-its-hash": { + Value: nil, + Hash: "literal-2-hash", + }, + "literal3": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-3", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "list of map of literals containing a mixture of literals have their hashes set or not set", + literal: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + "literal2-set-its-hash": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-2", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "literal-2-hash", + }, + "literal3": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-3", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "another-literal-1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-another-literal-1", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + Hash: "another-literal-1-hash", + }, + "another-literal2-set-its-hash": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-2", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + expectedLiteral: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "literal1": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + "literal2-set-its-hash": { + Value: nil, + Hash: "literal-2-hash", + }, + "literal3": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-3", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "another-literal-1": { + Value: nil, + Hash: "another-literal-1-hash", + }, + "another-literal2-set-its-hash": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_StructuredDataset{ + StructuredDataset: &core.StructuredDataset{ + Uri: "my-blob-stora://some-address-for-literal-2", + Metadata: &core.StructuredDatasetMetadata{ + StructuredDatasetType: &core.StructuredDatasetType{ + Format: "my-columnar-data-format", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "literal map containing hash", + literal: &core.Literal{ + Value: &core.Literal_Map{ + Map: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "hello": { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_StringValue{ + StringValue: "world", + }, + }, + }, + }, + }, + }, + }, + }, + }, + Hash: "0xffff", + }, + expectedLiteral: &core.Literal{ + Value: nil, + Hash: "0xffff", + }, + }, + { + name: "literal collection containing hash", + literal: &core.Literal{ + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{ + { + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Primitive{ + Primitive: &core.Primitive{ + Value: &core.Primitive_Integer{ + Integer: 42, + }, + }, + }, + }, + }, + }, + }, + }, + }, + Hash: "0xabcdef", + }, + expectedLiteral: &core.Literal{ + Value: nil, + Hash: "0xabcdef", + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.Equal(t, tt.expectedLiteral, hashify(tt.literal)) + + // Double-check that generating a tag is successful + literalMap := &core.LiteralMap{Literals: map[string]*core.Literal{"o0": tt.literal}} + hash, err := HashLiteralMap(context.TODO(), literalMap, nil) + assert.NoError(t, err) + assert.NotEmpty(t, hash) + }) + } +} + +// Ensure the key order on the inputs generates the same hash +func TestInputValueSorted(t *testing.T) { + literalMap, err := coreutils.MakeLiteralMap(map[string]interface{}{"1": 1, "2": 2}) + assert.NoError(t, err) + + hash, err := HashLiteralMap(context.TODO(), literalMap, nil) + assert.NoError(t, err) + assert.Equal(t, "GQid5LjHbakcW68DS3P2jp80QLbiF0olFHF2hTh5bg8", hash) + + literalMap, err = coreutils.MakeLiteralMap(map[string]interface{}{"2": 2, "1": 1}) + assert.NoError(t, err) + + hashDupe, err := HashLiteralMap(context.TODO(), literalMap, nil) + assert.NoError(t, err) + assert.Equal(t, hashDupe, hash) +} + +// Ensure that empty inputs are hashed the same way +func TestNoInputValues(t *testing.T) { + hash, err := HashLiteralMap(context.TODO(), nil, nil) + assert.NoError(t, err) + assert.Equal(t, "GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", hash) + + hashDupe, err := HashLiteralMap(context.TODO(), &core.LiteralMap{Literals: nil}, nil) + assert.NoError(t, err) + assert.Equal(t, "GKw-c0PwFokMUQ6T-TUmEWnZ4_VlQ2Qpgw-vCTT0-OQ", hashDupe) + assert.Equal(t, hashDupe, hash) +} + +// Ensure that empty inputs are hashed the same way +func TestCacheIgnoreInputVars(t *testing.T) { + literalMap, err := coreutils.MakeLiteralMap(map[string]interface{}{"1": 1, "2": 2}) + assert.NoError(t, err) + + hash, err := HashLiteralMap(context.TODO(), literalMap, nil) + assert.NoError(t, err) + assert.Equal(t, "GQid5LjHbakcW68DS3P2jp80QLbiF0olFHF2hTh5bg8", hash) + + literalMap, err = coreutils.MakeLiteralMap(map[string]interface{}{"2": 2, "1": 1, "3": 3}) + assert.NoError(t, err) + + hashDupe, err := HashLiteralMap(context.TODO(), literalMap, []string{"3"}) + assert.NoError(t, err) + assert.Equal(t, hashDupe, hash) +} + +func TestHashIdentifierExceptVersion(t *testing.T) { + identifier := core.Identifier{ + Project: "project_1", + Domain: "domain_1", + Name: "name_1", + Version: "0", + Org: "org_1", + } + + identifierDiffVersion := core.Identifier{ + Project: "project_1", + Domain: "domain_1", + Name: "name_1", + Version: "1", + Org: "org_1", + } + + expectedHashIdentifier := "+UmrGhEwHv3FesdpA4gliBluF3FUXz4tshmuOlw1FSk=" + + hashedIdentifier, err := HashIdentifierExceptVersion(context.TODO(), identifier) + assert.NoError(t, err) + assert.Equal(t, expectedHashIdentifier, hashedIdentifier) + + hashedIdentifierDiffVersion, err := HashIdentifierExceptVersion(context.TODO(), identifierDiffVersion) + assert.NoError(t, err) + assert.Equal(t, expectedHashIdentifier, hashedIdentifierDiffVersion) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/async_client.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/async_client.go new file mode 100644 index 0000000000..2c4e084273 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/async_client.go @@ -0,0 +1,112 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + + mock "github.com/stretchr/testify/mock" +) + +// AsyncClient is an autogenerated mock type for the AsyncClient type +type AsyncClient struct { + mock.Mock +} + +type AsyncClient_Download struct { + *mock.Call +} + +func (_m AsyncClient_Download) Return(outputFuture catalog.DownloadFuture, err error) *AsyncClient_Download { + return &AsyncClient_Download{Call: _m.Call.Return(outputFuture, err)} +} + +func (_m *AsyncClient) OnDownload(ctx context.Context, requests ...catalog.DownloadRequest) *AsyncClient_Download { + c_call := _m.On("Download", ctx, requests) + return &AsyncClient_Download{Call: c_call} +} + +func (_m *AsyncClient) OnDownloadMatch(matchers ...interface{}) *AsyncClient_Download { + c_call := _m.On("Download", matchers...) + return &AsyncClient_Download{Call: c_call} +} + +// Download provides a mock function with given fields: ctx, requests +func (_m *AsyncClient) Download(ctx context.Context, requests ...catalog.DownloadRequest) (catalog.DownloadFuture, error) { + _va := make([]interface{}, len(requests)) + for _i := range requests { + _va[_i] = requests[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 catalog.DownloadFuture + if rf, ok := ret.Get(0).(func(context.Context, ...catalog.DownloadRequest) catalog.DownloadFuture); ok { + r0 = rf(ctx, requests...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(catalog.DownloadFuture) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, ...catalog.DownloadRequest) error); ok { + r1 = rf(ctx, requests...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AsyncClient_Upload struct { + *mock.Call +} + +func (_m AsyncClient_Upload) Return(putFuture catalog.UploadFuture, err error) *AsyncClient_Upload { + return &AsyncClient_Upload{Call: _m.Call.Return(putFuture, err)} +} + +func (_m *AsyncClient) OnUpload(ctx context.Context, requests ...catalog.UploadRequest) *AsyncClient_Upload { + c_call := _m.On("Upload", ctx, requests) + return &AsyncClient_Upload{Call: c_call} +} + +func (_m *AsyncClient) OnUploadMatch(matchers ...interface{}) *AsyncClient_Upload { + c_call := _m.On("Upload", matchers...) + return &AsyncClient_Upload{Call: c_call} +} + +// Upload provides a mock function with given fields: ctx, requests +func (_m *AsyncClient) Upload(ctx context.Context, requests ...catalog.UploadRequest) (catalog.UploadFuture, error) { + _va := make([]interface{}, len(requests)) + for _i := range requests { + _va[_i] = requests[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + var r0 catalog.UploadFuture + if rf, ok := ret.Get(0).(func(context.Context, ...catalog.UploadRequest) catalog.UploadFuture); ok { + r0 = rf(ctx, requests...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(catalog.UploadFuture) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, ...catalog.UploadRequest) error); ok { + r1 = rf(ctx, requests...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/client.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/client.go new file mode 100644 index 0000000000..451351f013 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/client.go @@ -0,0 +1,249 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + + datacatalog "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog" + + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +type Client_Get struct { + *mock.Call +} + +func (_m Client_Get) Return(_a0 catalog.Entry, _a1 error) *Client_Get { + return &Client_Get{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Client) OnGet(ctx context.Context, key catalog.Key) *Client_Get { + c_call := _m.On("Get", ctx, key) + return &Client_Get{Call: c_call} +} + +func (_m *Client) OnGetMatch(matchers ...interface{}) *Client_Get { + c_call := _m.On("Get", matchers...) + return &Client_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx, key +func (_m *Client) Get(ctx context.Context, key catalog.Key) (catalog.Entry, error) { + ret := _m.Called(ctx, key) + + var r0 catalog.Entry + if rf, ok := ret.Get(0).(func(context.Context, catalog.Key) catalog.Entry); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(catalog.Entry) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, catalog.Key) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Client_GetOrExtendReservation struct { + *mock.Call +} + +func (_m Client_GetOrExtendReservation) Return(_a0 *datacatalog.Reservation, _a1 error) *Client_GetOrExtendReservation { + return &Client_GetOrExtendReservation{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Client) OnGetOrExtendReservation(ctx context.Context, key catalog.Key, ownerID string, heartbeatInterval time.Duration) *Client_GetOrExtendReservation { + c_call := _m.On("GetOrExtendReservation", ctx, key, ownerID, heartbeatInterval) + return &Client_GetOrExtendReservation{Call: c_call} +} + +func (_m *Client) OnGetOrExtendReservationMatch(matchers ...interface{}) *Client_GetOrExtendReservation { + c_call := _m.On("GetOrExtendReservation", matchers...) + return &Client_GetOrExtendReservation{Call: c_call} +} + +// GetOrExtendReservation provides a mock function with given fields: ctx, key, ownerID, heartbeatInterval +func (_m *Client) GetOrExtendReservation(ctx context.Context, key catalog.Key, ownerID string, heartbeatInterval time.Duration) (*datacatalog.Reservation, error) { + ret := _m.Called(ctx, key, ownerID, heartbeatInterval) + + var r0 *datacatalog.Reservation + if rf, ok := ret.Get(0).(func(context.Context, catalog.Key, string, time.Duration) *datacatalog.Reservation); ok { + r0 = rf(ctx, key, ownerID, heartbeatInterval) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*datacatalog.Reservation) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, catalog.Key, string, time.Duration) error); ok { + r1 = rf(ctx, key, ownerID, heartbeatInterval) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Client_GetReservationCache struct { + *mock.Call +} + +func (_m Client_GetReservationCache) Return(_a0 catalog.ReservationCache) *Client_GetReservationCache { + return &Client_GetReservationCache{Call: _m.Call.Return(_a0)} +} + +func (_m *Client) OnGetReservationCache(ownerID string) *Client_GetReservationCache { + c_call := _m.On("GetReservationCache", ownerID) + return &Client_GetReservationCache{Call: c_call} +} + +func (_m *Client) OnGetReservationCacheMatch(matchers ...interface{}) *Client_GetReservationCache { + c_call := _m.On("GetReservationCache", matchers...) + return &Client_GetReservationCache{Call: c_call} +} + +// GetReservationCache provides a mock function with given fields: ownerID +func (_m *Client) GetReservationCache(ownerID string) catalog.ReservationCache { + ret := _m.Called(ownerID) + + var r0 catalog.ReservationCache + if rf, ok := ret.Get(0).(func(string) catalog.ReservationCache); ok { + r0 = rf(ownerID) + } else { + r0 = ret.Get(0).(catalog.ReservationCache) + } + + return r0 +} + +type Client_Put struct { + *mock.Call +} + +func (_m Client_Put) Return(_a0 catalog.Status, _a1 error) *Client_Put { + return &Client_Put{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Client) OnPut(ctx context.Context, key catalog.Key, reader io.OutputReader, metadata catalog.Metadata) *Client_Put { + c_call := _m.On("Put", ctx, key, reader, metadata) + return &Client_Put{Call: c_call} +} + +func (_m *Client) OnPutMatch(matchers ...interface{}) *Client_Put { + c_call := _m.On("Put", matchers...) + return &Client_Put{Call: c_call} +} + +// Put provides a mock function with given fields: ctx, key, reader, metadata +func (_m *Client) Put(ctx context.Context, key catalog.Key, reader io.OutputReader, metadata catalog.Metadata) (catalog.Status, error) { + ret := _m.Called(ctx, key, reader, metadata) + + var r0 catalog.Status + if rf, ok := ret.Get(0).(func(context.Context, catalog.Key, io.OutputReader, catalog.Metadata) catalog.Status); ok { + r0 = rf(ctx, key, reader, metadata) + } else { + r0 = ret.Get(0).(catalog.Status) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, catalog.Key, io.OutputReader, catalog.Metadata) error); ok { + r1 = rf(ctx, key, reader, metadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Client_ReleaseReservation struct { + *mock.Call +} + +func (_m Client_ReleaseReservation) Return(_a0 error) *Client_ReleaseReservation { + return &Client_ReleaseReservation{Call: _m.Call.Return(_a0)} +} + +func (_m *Client) OnReleaseReservation(ctx context.Context, key catalog.Key, ownerID string) *Client_ReleaseReservation { + c_call := _m.On("ReleaseReservation", ctx, key, ownerID) + return &Client_ReleaseReservation{Call: c_call} +} + +func (_m *Client) OnReleaseReservationMatch(matchers ...interface{}) *Client_ReleaseReservation { + c_call := _m.On("ReleaseReservation", matchers...) + return &Client_ReleaseReservation{Call: c_call} +} + +// ReleaseReservation provides a mock function with given fields: ctx, key, ownerID +func (_m *Client) ReleaseReservation(ctx context.Context, key catalog.Key, ownerID string) error { + ret := _m.Called(ctx, key, ownerID) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, catalog.Key, string) error); ok { + r0 = rf(ctx, key, ownerID) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Client_Update struct { + *mock.Call +} + +func (_m Client_Update) Return(_a0 catalog.Status, _a1 error) *Client_Update { + return &Client_Update{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Client) OnUpdate(ctx context.Context, key catalog.Key, reader io.OutputReader, metadata catalog.Metadata) *Client_Update { + c_call := _m.On("Update", ctx, key, reader, metadata) + return &Client_Update{Call: c_call} +} + +func (_m *Client) OnUpdateMatch(matchers ...interface{}) *Client_Update { + c_call := _m.On("Update", matchers...) + return &Client_Update{Call: c_call} +} + +// Update provides a mock function with given fields: ctx, key, reader, metadata +func (_m *Client) Update(ctx context.Context, key catalog.Key, reader io.OutputReader, metadata catalog.Metadata) (catalog.Status, error) { + ret := _m.Called(ctx, key, reader, metadata) + + var r0 catalog.Status + if rf, ok := ret.Get(0).(func(context.Context, catalog.Key, io.OutputReader, catalog.Metadata) catalog.Status); ok { + r0 = rf(ctx, key, reader, metadata) + } else { + r0 = ret.Get(0).(catalog.Status) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, catalog.Key, io.OutputReader, catalog.Metadata) error); ok { + r1 = rf(ctx, key, reader, metadata) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// UpdateReservationCache provides a mock function with given fields: ownerID, entry +func (_m *Client) UpdateReservationCache(ownerID string, entry catalog.ReservationCache) { + _m.Called(ownerID, entry) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_future.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_future.go new file mode 100644 index 0000000000..b86e796e87 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_future.go @@ -0,0 +1,123 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + mock "github.com/stretchr/testify/mock" +) + +// DownloadFuture is an autogenerated mock type for the DownloadFuture type +type DownloadFuture struct { + mock.Mock +} + +type DownloadFuture_GetResponse struct { + *mock.Call +} + +func (_m DownloadFuture_GetResponse) Return(_a0 catalog.DownloadResponse, _a1 error) *DownloadFuture_GetResponse { + return &DownloadFuture_GetResponse{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *DownloadFuture) OnGetResponse() *DownloadFuture_GetResponse { + c_call := _m.On("GetResponse") + return &DownloadFuture_GetResponse{Call: c_call} +} + +func (_m *DownloadFuture) OnGetResponseMatch(matchers ...interface{}) *DownloadFuture_GetResponse { + c_call := _m.On("GetResponse", matchers...) + return &DownloadFuture_GetResponse{Call: c_call} +} + +// GetResponse provides a mock function with given fields: +func (_m *DownloadFuture) GetResponse() (catalog.DownloadResponse, error) { + ret := _m.Called() + + var r0 catalog.DownloadResponse + if rf, ok := ret.Get(0).(func() catalog.DownloadResponse); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(catalog.DownloadResponse) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type DownloadFuture_GetResponseError struct { + *mock.Call +} + +func (_m DownloadFuture_GetResponseError) Return(_a0 error) *DownloadFuture_GetResponseError { + return &DownloadFuture_GetResponseError{Call: _m.Call.Return(_a0)} +} + +func (_m *DownloadFuture) OnGetResponseError() *DownloadFuture_GetResponseError { + c_call := _m.On("GetResponseError") + return &DownloadFuture_GetResponseError{Call: c_call} +} + +func (_m *DownloadFuture) OnGetResponseErrorMatch(matchers ...interface{}) *DownloadFuture_GetResponseError { + c_call := _m.On("GetResponseError", matchers...) + return &DownloadFuture_GetResponseError{Call: c_call} +} + +// GetResponseError provides a mock function with given fields: +func (_m *DownloadFuture) GetResponseError() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type DownloadFuture_GetResponseStatus struct { + *mock.Call +} + +func (_m DownloadFuture_GetResponseStatus) Return(_a0 catalog.ResponseStatus) *DownloadFuture_GetResponseStatus { + return &DownloadFuture_GetResponseStatus{Call: _m.Call.Return(_a0)} +} + +func (_m *DownloadFuture) OnGetResponseStatus() *DownloadFuture_GetResponseStatus { + c_call := _m.On("GetResponseStatus") + return &DownloadFuture_GetResponseStatus{Call: c_call} +} + +func (_m *DownloadFuture) OnGetResponseStatusMatch(matchers ...interface{}) *DownloadFuture_GetResponseStatus { + c_call := _m.On("GetResponseStatus", matchers...) + return &DownloadFuture_GetResponseStatus{Call: c_call} +} + +// GetResponseStatus provides a mock function with given fields: +func (_m *DownloadFuture) GetResponseStatus() catalog.ResponseStatus { + ret := _m.Called() + + var r0 catalog.ResponseStatus + if rf, ok := ret.Get(0).(func() catalog.ResponseStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(catalog.ResponseStatus) + } + + return r0 +} + +// OnReady provides a mock function with given fields: handler +func (_m *DownloadFuture) OnReady(handler catalog.ReadyHandler) { + _m.Called(handler) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_response.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_response.go new file mode 100644 index 0000000000..3e17312194 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/download_response.go @@ -0,0 +1,112 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + bitarray "github.com/flyteorg/flyte/v2/flytestdlib/bitarray" + + mock "github.com/stretchr/testify/mock" +) + +// DownloadResponse is an autogenerated mock type for the DownloadResponse type +type DownloadResponse struct { + mock.Mock +} + +type DownloadResponse_GetCachedCount struct { + *mock.Call +} + +func (_m DownloadResponse_GetCachedCount) Return(_a0 int) *DownloadResponse_GetCachedCount { + return &DownloadResponse_GetCachedCount{Call: _m.Call.Return(_a0)} +} + +func (_m *DownloadResponse) OnGetCachedCount() *DownloadResponse_GetCachedCount { + c_call := _m.On("GetCachedCount") + return &DownloadResponse_GetCachedCount{Call: c_call} +} + +func (_m *DownloadResponse) OnGetCachedCountMatch(matchers ...interface{}) *DownloadResponse_GetCachedCount { + c_call := _m.On("GetCachedCount", matchers...) + return &DownloadResponse_GetCachedCount{Call: c_call} +} + +// GetCachedCount provides a mock function with given fields: +func (_m *DownloadResponse) GetCachedCount() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} + +type DownloadResponse_GetCachedResults struct { + *mock.Call +} + +func (_m DownloadResponse_GetCachedResults) Return(_a0 *bitarray.BitSet) *DownloadResponse_GetCachedResults { + return &DownloadResponse_GetCachedResults{Call: _m.Call.Return(_a0)} +} + +func (_m *DownloadResponse) OnGetCachedResults() *DownloadResponse_GetCachedResults { + c_call := _m.On("GetCachedResults") + return &DownloadResponse_GetCachedResults{Call: c_call} +} + +func (_m *DownloadResponse) OnGetCachedResultsMatch(matchers ...interface{}) *DownloadResponse_GetCachedResults { + c_call := _m.On("GetCachedResults", matchers...) + return &DownloadResponse_GetCachedResults{Call: c_call} +} + +// GetCachedResults provides a mock function with given fields: +func (_m *DownloadResponse) GetCachedResults() *bitarray.BitSet { + ret := _m.Called() + + var r0 *bitarray.BitSet + if rf, ok := ret.Get(0).(func() *bitarray.BitSet); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*bitarray.BitSet) + } + } + + return r0 +} + +type DownloadResponse_GetResultsSize struct { + *mock.Call +} + +func (_m DownloadResponse_GetResultsSize) Return(_a0 int) *DownloadResponse_GetResultsSize { + return &DownloadResponse_GetResultsSize{Call: _m.Call.Return(_a0)} +} + +func (_m *DownloadResponse) OnGetResultsSize() *DownloadResponse_GetResultsSize { + c_call := _m.On("GetResultsSize") + return &DownloadResponse_GetResultsSize{Call: c_call} +} + +func (_m *DownloadResponse) OnGetResultsSizeMatch(matchers ...interface{}) *DownloadResponse_GetResultsSize { + c_call := _m.On("GetResultsSize", matchers...) + return &DownloadResponse_GetResultsSize{Call: c_call} +} + +// GetResultsSize provides a mock function with given fields: +func (_m *DownloadResponse) GetResultsSize() int { + ret := _m.Called() + + var r0 int + if rf, ok := ret.Get(0).(func() int); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/future.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/future.go new file mode 100644 index 0000000000..35689665f9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/future.go @@ -0,0 +1,82 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + mock "github.com/stretchr/testify/mock" +) + +// Future is an autogenerated mock type for the Future type +type Future struct { + mock.Mock +} + +type Future_GetResponseError struct { + *mock.Call +} + +func (_m Future_GetResponseError) Return(_a0 error) *Future_GetResponseError { + return &Future_GetResponseError{Call: _m.Call.Return(_a0)} +} + +func (_m *Future) OnGetResponseError() *Future_GetResponseError { + c_call := _m.On("GetResponseError") + return &Future_GetResponseError{Call: c_call} +} + +func (_m *Future) OnGetResponseErrorMatch(matchers ...interface{}) *Future_GetResponseError { + c_call := _m.On("GetResponseError", matchers...) + return &Future_GetResponseError{Call: c_call} +} + +// GetResponseError provides a mock function with given fields: +func (_m *Future) GetResponseError() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Future_GetResponseStatus struct { + *mock.Call +} + +func (_m Future_GetResponseStatus) Return(_a0 catalog.ResponseStatus) *Future_GetResponseStatus { + return &Future_GetResponseStatus{Call: _m.Call.Return(_a0)} +} + +func (_m *Future) OnGetResponseStatus() *Future_GetResponseStatus { + c_call := _m.On("GetResponseStatus") + return &Future_GetResponseStatus{Call: c_call} +} + +func (_m *Future) OnGetResponseStatusMatch(matchers ...interface{}) *Future_GetResponseStatus { + c_call := _m.On("GetResponseStatus", matchers...) + return &Future_GetResponseStatus{Call: c_call} +} + +// GetResponseStatus provides a mock function with given fields: +func (_m *Future) GetResponseStatus() catalog.ResponseStatus { + ret := _m.Called() + + var r0 catalog.ResponseStatus + if rf, ok := ret.Get(0).(func() catalog.ResponseStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(catalog.ResponseStatus) + } + + return r0 +} + +// OnReady provides a mock function with given fields: handler +func (_m *Future) OnReady(handler catalog.ReadyHandler) { + _m.Called(handler) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/upload_future.go b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/upload_future.go new file mode 100644 index 0000000000..c33e0ac012 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/mocks/upload_future.go @@ -0,0 +1,82 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + mock "github.com/stretchr/testify/mock" +) + +// UploadFuture is an autogenerated mock type for the UploadFuture type +type UploadFuture struct { + mock.Mock +} + +type UploadFuture_GetResponseError struct { + *mock.Call +} + +func (_m UploadFuture_GetResponseError) Return(_a0 error) *UploadFuture_GetResponseError { + return &UploadFuture_GetResponseError{Call: _m.Call.Return(_a0)} +} + +func (_m *UploadFuture) OnGetResponseError() *UploadFuture_GetResponseError { + c_call := _m.On("GetResponseError") + return &UploadFuture_GetResponseError{Call: c_call} +} + +func (_m *UploadFuture) OnGetResponseErrorMatch(matchers ...interface{}) *UploadFuture_GetResponseError { + c_call := _m.On("GetResponseError", matchers...) + return &UploadFuture_GetResponseError{Call: c_call} +} + +// GetResponseError provides a mock function with given fields: +func (_m *UploadFuture) GetResponseError() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type UploadFuture_GetResponseStatus struct { + *mock.Call +} + +func (_m UploadFuture_GetResponseStatus) Return(_a0 catalog.ResponseStatus) *UploadFuture_GetResponseStatus { + return &UploadFuture_GetResponseStatus{Call: _m.Call.Return(_a0)} +} + +func (_m *UploadFuture) OnGetResponseStatus() *UploadFuture_GetResponseStatus { + c_call := _m.On("GetResponseStatus") + return &UploadFuture_GetResponseStatus{Call: c_call} +} + +func (_m *UploadFuture) OnGetResponseStatusMatch(matchers ...interface{}) *UploadFuture_GetResponseStatus { + c_call := _m.On("GetResponseStatus", matchers...) + return &UploadFuture_GetResponseStatus{Call: c_call} +} + +// GetResponseStatus provides a mock function with given fields: +func (_m *UploadFuture) GetResponseStatus() catalog.ResponseStatus { + ret := _m.Called() + + var r0 catalog.ResponseStatus + if rf, ok := ret.Get(0).(func() catalog.ResponseStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(catalog.ResponseStatus) + } + + return r0 +} + +// OnReady provides a mock function with given fields: handler +func (_m *UploadFuture) OnReady(handler catalog.ReadyHandler) { + _m.Called(handler) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/reader_processor.go b/flyteplugins/go/tasks/pluginmachinery/catalog/reader_processor.go new file mode 100644 index 0000000000..df22f3ac25 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/reader_processor.go @@ -0,0 +1,87 @@ +package catalog + +import ( + "context" + "fmt" + "reflect" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type ReaderWorkItem struct { + // ReaderWorkItem outputs: + cached bool + + // ReaderWorkItem Inputs: + outputsWriter io.OutputWriter + // Inputs to query data catalog + key Key +} + +func (item ReaderWorkItem) IsCached() bool { + return item.cached +} + +func NewReaderWorkItem(key Key, outputsWriter io.OutputWriter) *ReaderWorkItem { + return &ReaderWorkItem{ + key: key, + outputsWriter: outputsWriter, + } +} + +type ReaderProcessor struct { + catalogClient Client +} + +func (p ReaderProcessor) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) { + wi, casted := workItem.(*ReaderWorkItem) + if !casted { + return workqueue.WorkStatusNotDone, fmt.Errorf("wrong work item type. Received: %v", reflect.TypeOf(workItem)) + } + + op, err := p.catalogClient.Get(ctx, wi.key) + if err != nil { + if IsNotFound(err) { + logger.Infof(ctx, "Artifact not found in Catalog. Key: %v", wi.key) + wi.cached = false + return workqueue.WorkStatusSucceeded, nil + } + + err = errors.Wrapf("CausedBy", err, "Failed to call catalog for Key: %v.", wi.key) + logger.Warnf(ctx, "Cache call failed: %v", err) + return workqueue.WorkStatusFailed, err + } + + if op.status.GetCacheStatus() == core.CatalogCacheStatus_CACHE_LOOKUP_FAILURE { + return workqueue.WorkStatusFailed, errors.Errorf(errors.DownstreamSystemError, "failed to lookup cache") + } + + if op.status.GetCacheStatus() == core.CatalogCacheStatus_CACHE_MISS || op.GetOutputs() == nil { + wi.cached = false + return workqueue.WorkStatusSucceeded, nil + } + + // TODO: Check task interface, if it has outputs but literalmap is empty (or not matching output), error. + logger.Debugf(ctx, "Persisting output to %v", wi.outputsWriter.GetOutputPath()) + err = wi.outputsWriter.Put(ctx, op.GetOutputs()) + if err != nil { + err = errors.Wrapf("CausedBy", err, "Failed to persist cached output for Key: %v.", wi.key) + logger.Warnf(ctx, "Cache write to output writer failed: %v", err) + return workqueue.WorkStatusFailed, err + } + + wi.cached = true + + logger.Debugf(ctx, "Successfully read from catalog. Key [%v]", wi.key) + return workqueue.WorkStatusSucceeded, nil +} + +func NewReaderProcessor(catalogClient Client) ReaderProcessor { + return ReaderProcessor{ + catalogClient: catalogClient, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/response.go b/flyteplugins/go/tasks/pluginmachinery/catalog/response.go new file mode 100644 index 0000000000..8ec56f8e7c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/response.go @@ -0,0 +1,87 @@ +package catalog + +import ( + "github.com/flyteorg/flyte/v2/flytestdlib/bitarray" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" +) + +type future struct { + responseStatus ResponseStatus + readyHandler ReadyHandler + err error +} + +func (f future) GetResponseStatus() ResponseStatus { + return f.responseStatus +} + +func (f future) GetResponseError() error { + return f.err +} + +func (f *future) SetResponseStatus(status ResponseStatus) { + f.responseStatus = status +} + +func (f *future) OnReady(handler ReadyHandler) { + f.readyHandler = handler +} + +type downloadFuture struct { + *future + + cachedResults *bitarray.BitSet + cachedCount int + resultsSize int +} + +func (r downloadFuture) GetResponse() (DownloadResponse, error) { + if r.GetResponseStatus() != ResponseStatusReady { + return nil, errors.Errorf(ErrResponseNotReady, "Response is not ready yet.") + } + + if r.GetResponseError() != nil { + return nil, errors.Wrapf(ErrSystemError, r.GetResponseError(), "ResponseError() is not nil.") + } + + return r, nil +} + +func (r downloadFuture) GetResultsSize() int { + return r.resultsSize +} + +func (r downloadFuture) GetCachedResults() *bitarray.BitSet { + return r.cachedResults +} + +func (r downloadFuture) GetCachedCount() int { + return r.cachedCount +} + +func newDownloadFuture(status ResponseStatus, err error, cachedResults *bitarray.BitSet, resultsSize int, + cachedCount int) downloadFuture { + + return downloadFuture{ + future: &future{ + responseStatus: status, + err: err, + }, + cachedCount: cachedCount, + cachedResults: cachedResults, + resultsSize: resultsSize, + } +} + +type uploadFuture struct { + *future +} + +func newUploadFuture(status ResponseStatus, err error) uploadFuture { + return uploadFuture{ + future: &future{ + responseStatus: status, + err: err, + }, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/catalog/writer_processor.go b/flyteplugins/go/tasks/pluginmachinery/catalog/writer_processor.go new file mode 100644 index 0000000000..e89c367558 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/catalog/writer_processor.go @@ -0,0 +1,62 @@ +package catalog + +import ( + "context" + "fmt" + "reflect" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type WriterWorkItem struct { + // WriterWorkItem Inputs + key Key + data io.OutputReader + metadata Metadata +} + +func NewWriterWorkItem(key Key, data io.OutputReader, metadata Metadata) *WriterWorkItem { + return &WriterWorkItem{ + key: key, + data: data, + metadata: metadata, + } +} + +type writerProcessor struct { + catalogClient Client +} + +func (p writerProcessor) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) { + wi, casted := workItem.(*WriterWorkItem) + if !casted { + return workqueue.WorkStatusNotDone, fmt.Errorf("wrong work item type. Received: %v", reflect.TypeOf(workItem)) + } + + status, err := p.catalogClient.Put(ctx, wi.key, wi.data, wi.metadata) + if err != nil { + logger.Errorf(ctx, "Error putting to catalog [%s]", err) + return workqueue.WorkStatusNotDone, errors.Wrapf(errors.DownstreamSystemError, err, + "Error writing to catalog, key id [%v] cache version [%v]", + wi.key.Identifier, wi.key.CacheVersion) + } + + if status.GetCacheStatus() == core.CatalogCacheStatus_CACHE_PUT_FAILURE { + return workqueue.WorkStatusNotDone, errors.Errorf(errors.DownstreamSystemError, + "Error writing to catalog, key id [%v] cache version [%v]", + wi.key.Identifier, wi.key.CacheVersion) + } + + logger.Debugf(ctx, "Successfully wrote to catalog. Key [%v]", wi.key) + return workqueue.WorkStatusSucceeded, nil +} + +func NewWriterProcessor(catalogClient Client) workqueue.Processor { + return writerProcessor{ + catalogClient: catalogClient, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/allocationstatus_enumer.go b/flyteplugins/go/tasks/pluginmachinery/core/allocationstatus_enumer.go new file mode 100644 index 0000000000..dbbb536ca8 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/allocationstatus_enumer.go @@ -0,0 +1,51 @@ +// Code generated by "enumer -type=AllocationStatus -trimprefix=AllocationStatus"; DO NOT EDIT. + +package core + +import ( + "fmt" +) + +const _AllocationStatusName = "AllocationUndefinedGrantedExhaustedNamespaceQuotaExceeded" + +var _AllocationStatusIndex = [...]uint8{0, 19, 26, 35, 57} + +func (i AllocationStatus) String() string { + if i < 0 || i >= AllocationStatus(len(_AllocationStatusIndex)-1) { + return fmt.Sprintf("AllocationStatus(%d)", i) + } + return _AllocationStatusName[_AllocationStatusIndex[i]:_AllocationStatusIndex[i+1]] +} + +var _AllocationStatusValues = []AllocationStatus{0, 1, 2, 3} + +var _AllocationStatusNameToValueMap = map[string]AllocationStatus{ + _AllocationStatusName[0:19]: 0, + _AllocationStatusName[19:26]: 1, + _AllocationStatusName[26:35]: 2, + _AllocationStatusName[35:57]: 3, +} + +// AllocationStatusString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func AllocationStatusString(s string) (AllocationStatus, error) { + if val, ok := _AllocationStatusNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to AllocationStatus values", s) +} + +// AllocationStatusValues returns all values of the enum +func AllocationStatusValues() []AllocationStatus { + return _AllocationStatusValues +} + +// IsAAllocationStatus returns "true" if the value is listed in the enum definition. "false" otherwise +func (i AllocationStatus) IsAAllocationStatus() bool { + for _, v := range _AllocationStatusValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/exec_context.go b/flyteplugins/go/tasks/pluginmachinery/core/exec_context.go new file mode 100644 index 0000000000..eb0d0bf348 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/exec_context.go @@ -0,0 +1,75 @@ +package core + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +//go:generate mockery -all -case=underscore + +// An interface to access a remote/sharable location that contains the serialized TaskTemplate +type TaskTemplatePath interface { + // Returns the path + Path(ctx context.Context) (storage.DataReference, error) +} + +// An interface to access the TaskInformation +type TaskReader interface { + TaskTemplatePath + // Returns the core TaskTemplate + Read(ctx context.Context) (*core.TaskTemplate, error) +} + +// An interface that is passed to every plugin invocation. It carries all meta and contextual information for the current +// task execution +type TaskExecutionContext interface { + // Returns a resource manager that can be used to create reservations for limited resources + ResourceManager() ResourceManager + + // Returns a secret manager that can retrieve configured secrets for this plugin + SecretManager() SecretManager + + // Returns a method that allows a plugin to indicate that the task has a new update and can be invoked again to check for updates + TaskRefreshIndicator() SignalAsync + + // Returns a handle to the currently configured storage backend that can be used to communicate with the tasks or write metadata + DataStore() *storage.DataStore + + // Returns a reader that retrieves previously stored plugin internal state. the state itself is immutable + PluginStateReader() PluginStateReader + + // Returns a TaskReader, to retrieve task details + TaskReader() TaskReader + + // Returns an input reader to retrieve input data + InputReader() io.InputReader + + // Returns a handle to the Task's execution metadata. + TaskExecutionMetadata() TaskExecutionMetadata + + // Provides an output sync of type io.OutputWriter + OutputWriter() io.OutputWriter + + // Get a handle to the PluginStateWriter. Any mutations to the plugins internal state can be persisted using this + // These mutation will be visible in the next round + PluginStateWriter() PluginStateWriter + + // Get a handle to catalog client + Catalog() catalog.AsyncClient + + // Returns a handle to the Task events recorder, which get stored in the Admin. + EventsRecorder() EventsRecorder +} + +// A simple fire-and-forget func +type SignalAsync func(ctx context.Context) + +// Task events recorder, which get stored in the Admin. If this is invoked multiple times, +// multiple events will be sent to Admin. It is not recommended that one uses this interface, a transition will trigger an auto event to admin +type EventsRecorder interface { + RecordRaw(ctx context.Context, ev PhaseInfo) error +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go new file mode 100644 index 0000000000..1eb07c5f1b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/exec_metadata.go @@ -0,0 +1,84 @@ +package core + +import ( + "fmt" + + v1 "k8s.io/api/core/v1" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/common" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +// TaskOverrides interface to expose any overrides that have been set for this task (like resource overrides etc) +type TaskOverrides interface { + GetResources() *v1.ResourceRequirements + GetExtendedResources() *core.ExtendedResources + GetContainerImage() string + GetConfigMap() *v1.ConfigMap + GetPodTemplate() *core.K8SPod + GetConfig() map[string]string +} + +type ConnectionWrapper struct { + Connection core.Connection + Source common.AttributesSource +} + +// ExternalResourceAttributes is a wrapper around ExternalResourceAttributes to expose the source of the attributes +type ExternalResourceAttributes struct { + Connections map[string]ConnectionWrapper +} + +func (e ExternalResourceAttributes) GetConnection(name string) (*core.Connection, common.AttributesSource, error) { + if connWrapper, ok := e.Connections[name]; ok { + return &connWrapper.Connection, connWrapper.Source, nil + } + return nil, common.AttributesSource_SOURCE_UNSPECIFIED, fmt.Errorf("connection [%s] not found", name) +} + +func (e ExternalResourceAttributes) GetConnections() map[string]ConnectionWrapper { + return e.Connections +} + +// TaskExecutionID is a simple Interface to expose the ExecutionID of the running Task +type TaskExecutionID interface { + // GetGeneratedName returns the computed/generated name for the task id + // deprecated: use GetGeneratedNameWithLength + GetGeneratedName() string + + // GetGeneratedNameWith returns the generated name within a bounded length. If the name is smaller than minLength, + // it'll get right-padded with character '0'. If the name is bigger than maxLength, it'll get hashed to fit within. + GetGeneratedNameWith(minLength, maxLength int) (string, error) + + // GetID returns the underlying idl task identifier. + GetID() core.TaskExecutionIdentifier + + // GetUniqueNodeID returns the fully-qualified Node ID that is unique within a + // given workflow execution. + GetUniqueNodeID() string +} + +// TaskExecutionMetadata represents any execution information for a Task. It is used to communicate meta information about the +// execution or any previously stored information +type TaskExecutionMetadata interface { + // GetOwnerID returns the owning Kubernetes object + GetOwnerID() types.NamespacedName + // GetTaskExecutionID is a specially generated task execution id, that is guaranteed to be unique and consistent for subsequent calls + GetTaskExecutionID() TaskExecutionID + GetNamespace() string + GetOwnerReference() v12.OwnerReference + GetOverrides() TaskOverrides + GetLabels() map[string]string + GetMaxAttempts() uint32 + GetAnnotations() map[string]string + GetK8sServiceAccount() string + GetSecurityContext() core.SecurityContext + IsInterruptible() bool + GetPlatformResources() *v1.ResourceRequirements + GetInterruptibleFailureThreshold() int32 + GetEnvironmentVariables() map[string]string + GetExternalResourceAttributes() ExternalResourceAttributes + GetConsoleURL() string +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/kube_client.go b/flyteplugins/go/tasks/pluginmachinery/core/kube_client.go new file mode 100644 index 0000000000..54c8861129 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/kube_client.go @@ -0,0 +1,16 @@ +package core + +import ( + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +// TODO we may not want to expose this? +// A friendly controller-runtime client that gets passed to executors +type KubeClient interface { + // GetClient returns a client configured with the Config + GetClient() client.Client + + // GetCache returns a cache.Cache + GetCache() cache.Cache +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/events_recorder.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/events_recorder.go new file mode 100644 index 0000000000..19ee1c9f46 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/events_recorder.go @@ -0,0 +1,47 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// EventsRecorder is an autogenerated mock type for the EventsRecorder type +type EventsRecorder struct { + mock.Mock +} + +type EventsRecorder_RecordRaw struct { + *mock.Call +} + +func (_m EventsRecorder_RecordRaw) Return(_a0 error) *EventsRecorder_RecordRaw { + return &EventsRecorder_RecordRaw{Call: _m.Call.Return(_a0)} +} + +func (_m *EventsRecorder) OnRecordRaw(ctx context.Context, ev core.PhaseInfo) *EventsRecorder_RecordRaw { + c_call := _m.On("RecordRaw", ctx, ev) + return &EventsRecorder_RecordRaw{Call: c_call} +} + +func (_m *EventsRecorder) OnRecordRawMatch(matchers ...interface{}) *EventsRecorder_RecordRaw { + c_call := _m.On("RecordRaw", matchers...) + return &EventsRecorder_RecordRaw{Call: c_call} +} + +// RecordRaw provides a mock function with given fields: ctx, ev +func (_m *EventsRecorder) RecordRaw(ctx context.Context, ev core.PhaseInfo) error { + ret := _m.Called(ctx, ev) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.PhaseInfo) error); ok { + r0 = rf(ctx, ev) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_cache.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_cache.go new file mode 100644 index 0000000000..09e76b7949 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_cache.go @@ -0,0 +1,176 @@ +package mocks + +import ( + "context" + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeKubeCache struct { + // Reader acts as a client to objects stored in the cache. + client.Reader + + // Informers loads informers and adds field indices. + cache.Informers + syncObj sync.RWMutex + Cache map[string]runtime.Object +} + +func (m *FakeKubeCache) GetInformer(ctx context.Context, obj client.Object, opts ...cache.InformerGetOption) (cache.Informer, error) { + panic("implement me") +} + +func (m *FakeKubeCache) GetInformerForKind(ctx context.Context, gvk schema.GroupVersionKind, opts ...cache.InformerGetOption) (cache.Informer, error) { + panic("implement me") +} + +func (m *FakeKubeCache) Start(ctx context.Context) error { + panic("implement me") +} + +func (m *FakeKubeCache) WaitForCacheSync(ctx context.Context) bool { + panic("implement me") +} + +func (m *FakeKubeCache) IndexField(ctx context.Context, obj client.Object, field string, extractValue client.IndexerFunc) error { + panic("implement me") +} + +func (m *FakeKubeCache) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + m.syncObj.RLock() + defer m.syncObj.RUnlock() + + item, found := m.Cache[formatKey(key, out.GetObjectKind().GroupVersionKind())] + if found { + // deep copy to avoid mutating cache + item = item.DeepCopyObject() + _, isUnstructured := out.(*unstructured.Unstructured) + if isUnstructured { + // Copy the value of the item in the cache to the returned value + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(item) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + return nil + } + + p, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) + if err != nil { + return err + } + + return runtime.DefaultUnstructuredConverter.FromUnstructured(p, out) + } + + return errors.NewNotFound(schema.GroupResource{}, key.Name) +} + +func (m *FakeKubeCache) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + m.syncObj.RLock() + defer m.syncObj.RUnlock() + + objs := make([]runtime.Object, 0, len(m.Cache)) + + listOptions := &client.ListOptions{} + for _, opt := range opts { + opt.ApplyToList(listOptions) + } + + for _, val := range m.Cache { + if listOptions.Raw != nil { + if val.GetObjectKind().GroupVersionKind().Kind != listOptions.Raw.Kind { + continue + } + + if val.GetObjectKind().GroupVersionKind().GroupVersion().String() != listOptions.Raw.APIVersion { + continue + } + } + + objs = append(objs, val.DeepCopyObject()) + } + + return meta.SetList(list, objs) +} + +func (m *FakeKubeCache) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) (err error) { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + if _, exists := m.Cache[key]; !exists { + m.Cache[key] = obj + return nil + } + + return errors.NewAlreadyExists(schema.GroupResource{}, accessor.GetName()) +} + +func (m *FakeKubeCache) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + delete(m.Cache, key) + + return nil +} + +func (m *FakeKubeCache) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + if _, exists := m.Cache[key]; exists { + m.Cache[key] = obj + return nil + } + + return errors.NewNotFound(schema.GroupResource{}, accessor.GetName()) +} + +func NewFakeKubeCache() *FakeKubeCache { + return &FakeKubeCache{ + syncObj: sync.RWMutex{}, + Cache: map[string]runtime.Object{}, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_client.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_client.go new file mode 100644 index 0000000000..e590ac04ee --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/fake_k8s_client.go @@ -0,0 +1,208 @@ +package mocks + +import ( + "context" + "fmt" + "reflect" + "sync" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type FakeKubeClient struct { + client.Reader + client.Writer + client.StatusClient + client.SubResourceClientConstructor + syncObj sync.RWMutex + Cache map[string]runtime.Object +} + +func formatKey(name types.NamespacedName, kind schema.GroupVersionKind) string { + key := fmt.Sprintf("%v:%v", name.String(), kind.String()) + return key +} + +func (m *FakeKubeClient) Get(ctx context.Context, key client.ObjectKey, out client.Object, opts ...client.GetOption) error { + m.syncObj.RLock() + defer m.syncObj.RUnlock() + + item, found := m.Cache[formatKey(key, out.GetObjectKind().GroupVersionKind())] + if found { + // deep copy to avoid mutating cache + item = item.DeepCopyObject() + _, isUnstructured := out.(*unstructured.Unstructured) + if isUnstructured { + // Copy the value of the item in the cache to the returned value + outVal := reflect.ValueOf(out) + objVal := reflect.ValueOf(item) + if !objVal.Type().AssignableTo(outVal.Type()) { + return fmt.Errorf("cache had type %s, but %s was asked for", objVal.Type(), outVal.Type()) + } + reflect.Indirect(outVal).Set(reflect.Indirect(objVal)) + return nil + } + + p, err := runtime.DefaultUnstructuredConverter.ToUnstructured(item) + if err != nil { + return err + } + + return runtime.DefaultUnstructuredConverter.FromUnstructured(p, out) + } + + return errors.NewNotFound(schema.GroupResource{}, key.Name) +} + +// GroupVersionKindFor returns the GroupVersionKind for the given object. +func (m *FakeKubeClient) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) { + panic("implement me") +} + +// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced. +func (m *FakeKubeClient) IsObjectNamespaced(obj runtime.Object) (bool, error) { + panic("implement me") +} + +func (m *FakeKubeClient) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + m.syncObj.RLock() + defer m.syncObj.RUnlock() + + objs := make([]runtime.Object, 0, len(m.Cache)) + + listOptions := &client.ListOptions{} + for _, opt := range opts { + opt.ApplyToList(listOptions) + } + + for _, val := range m.Cache { + if listOptions.Raw != nil { + if val.GetObjectKind().GroupVersionKind().Kind != listOptions.Raw.Kind { + continue + } + + if val.GetObjectKind().GroupVersionKind().GroupVersion().String() != listOptions.Raw.APIVersion { + continue + } + } + + objs = append(objs, val.DeepCopyObject()) + } + + return meta.SetList(list, objs) +} + +func (m *FakeKubeClient) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) (err error) { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + // if obj is a *v1.Pod then append a ContainerStatus for each Container + pod, ok := obj.(*v1.Pod) + if ok { + for i := range pod.Spec.Containers { + if len(pod.Status.ContainerStatuses) > i { + continue + } + + pod.Status.ContainerStatuses = append(pod.Status.ContainerStatuses, v1.ContainerStatus{ + ContainerID: "docker://container-name", + }) + } + } + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + if _, exists := m.Cache[key]; !exists { + m.Cache[key] = obj + return nil + } + + return errors.NewAlreadyExists(schema.GroupResource{}, accessor.GetName()) +} + +func (m *FakeKubeClient) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + delete(m.Cache, key) + + return nil +} + +func (m *FakeKubeClient) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error { + m.syncObj.Lock() + defer m.syncObj.Unlock() + + accessor, err := meta.Accessor(obj) + if err != nil { + return err + } + + key := formatKey(types.NamespacedName{ + Name: accessor.GetName(), + Namespace: accessor.GetNamespace(), + }, obj.GetObjectKind().GroupVersionKind()) + + if _, exists := m.Cache[key]; exists { + m.Cache[key] = obj + return nil + } + + return errors.NewNotFound(schema.GroupResource{}, accessor.GetName()) +} + +func (*FakeKubeClient) Status() client.StatusWriter { + panic("implement me") +} + +// Patch patches the given obj in the Kubernetes cluster. obj must be a +// struct pointer so that obj can be updated with the content returned by the Server. +func (*FakeKubeClient) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error { + panic("implement me") + +} + +// DeleteAllOf deletes all objects of the given type matching the given options. +func (*FakeKubeClient) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error { + panic("implement me") +} + +func (*FakeKubeClient) Scheme() *runtime.Scheme { + panic("implement me") +} + +func (*FakeKubeClient) RESTMapper() meta.RESTMapper { + panic("implement me") +} + +func NewFakeKubeClient() *FakeKubeClient { + return &FakeKubeClient{ + syncObj: sync.RWMutex{}, + Cache: map[string]runtime.Object{}, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/kube_client.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/kube_client.go new file mode 100644 index 0000000000..e437c6c587 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/kube_client.go @@ -0,0 +1,83 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + cache "sigs.k8s.io/controller-runtime/pkg/cache" + client "sigs.k8s.io/controller-runtime/pkg/client" + + mock "github.com/stretchr/testify/mock" +) + +// KubeClient is an autogenerated mock type for the KubeClient type +type KubeClient struct { + mock.Mock +} + +type KubeClient_GetCache struct { + *mock.Call +} + +func (_m KubeClient_GetCache) Return(_a0 cache.Cache) *KubeClient_GetCache { + return &KubeClient_GetCache{Call: _m.Call.Return(_a0)} +} + +func (_m *KubeClient) OnGetCache() *KubeClient_GetCache { + c_call := _m.On("GetCache") + return &KubeClient_GetCache{Call: c_call} +} + +func (_m *KubeClient) OnGetCacheMatch(matchers ...interface{}) *KubeClient_GetCache { + c_call := _m.On("GetCache", matchers...) + return &KubeClient_GetCache{Call: c_call} +} + +// GetCache provides a mock function with given fields: +func (_m *KubeClient) GetCache() cache.Cache { + ret := _m.Called() + + var r0 cache.Cache + if rf, ok := ret.Get(0).(func() cache.Cache); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(cache.Cache) + } + } + + return r0 +} + +type KubeClient_GetClient struct { + *mock.Call +} + +func (_m KubeClient_GetClient) Return(_a0 client.Client) *KubeClient_GetClient { + return &KubeClient_GetClient{Call: _m.Call.Return(_a0)} +} + +func (_m *KubeClient) OnGetClient() *KubeClient_GetClient { + c_call := _m.On("GetClient") + return &KubeClient_GetClient{Call: c_call} +} + +func (_m *KubeClient) OnGetClientMatch(matchers ...interface{}) *KubeClient_GetClient { + c_call := _m.On("GetClient", matchers...) + return &KubeClient_GetClient{Call: c_call} +} + +// GetClient provides a mock function with given fields: +func (_m *KubeClient) GetClient() client.Client { + ret := _m.Called() + + var r0 client.Client + if rf, ok := ret.Get(0).(func() client.Client); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Client) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin.go new file mode 100644 index 0000000000..7401497e42 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin.go @@ -0,0 +1,182 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// Plugin is an autogenerated mock type for the Plugin type +type Plugin struct { + mock.Mock +} + +type Plugin_Abort struct { + *mock.Call +} + +func (_m Plugin_Abort) Return(_a0 error) *Plugin_Abort { + return &Plugin_Abort{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnAbort(ctx context.Context, tCtx core.TaskExecutionContext) *Plugin_Abort { + c_call := _m.On("Abort", ctx, tCtx) + return &Plugin_Abort{Call: c_call} +} + +func (_m *Plugin) OnAbortMatch(matchers ...interface{}) *Plugin_Abort { + c_call := _m.On("Abort", matchers...) + return &Plugin_Abort{Call: c_call} +} + +// Abort provides a mock function with given fields: ctx, tCtx +func (_m *Plugin) Abort(ctx context.Context, tCtx core.TaskExecutionContext) error { + ret := _m.Called(ctx, tCtx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionContext) error); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Plugin_Finalize struct { + *mock.Call +} + +func (_m Plugin_Finalize) Return(_a0 error) *Plugin_Finalize { + return &Plugin_Finalize{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnFinalize(ctx context.Context, tCtx core.TaskExecutionContext) *Plugin_Finalize { + c_call := _m.On("Finalize", ctx, tCtx) + return &Plugin_Finalize{Call: c_call} +} + +func (_m *Plugin) OnFinalizeMatch(matchers ...interface{}) *Plugin_Finalize { + c_call := _m.On("Finalize", matchers...) + return &Plugin_Finalize{Call: c_call} +} + +// Finalize provides a mock function with given fields: ctx, tCtx +func (_m *Plugin) Finalize(ctx context.Context, tCtx core.TaskExecutionContext) error { + ret := _m.Called(ctx, tCtx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionContext) error); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type Plugin_GetID struct { + *mock.Call +} + +func (_m Plugin_GetID) Return(_a0 string) *Plugin_GetID { + return &Plugin_GetID{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnGetID() *Plugin_GetID { + c_call := _m.On("GetID") + return &Plugin_GetID{Call: c_call} +} + +func (_m *Plugin) OnGetIDMatch(matchers ...interface{}) *Plugin_GetID { + c_call := _m.On("GetID", matchers...) + return &Plugin_GetID{Call: c_call} +} + +// GetID provides a mock function with given fields: +func (_m *Plugin) GetID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Plugin_GetProperties struct { + *mock.Call +} + +func (_m Plugin_GetProperties) Return(_a0 core.PluginProperties) *Plugin_GetProperties { + return &Plugin_GetProperties{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnGetProperties() *Plugin_GetProperties { + c_call := _m.On("GetProperties") + return &Plugin_GetProperties{Call: c_call} +} + +func (_m *Plugin) OnGetPropertiesMatch(matchers ...interface{}) *Plugin_GetProperties { + c_call := _m.On("GetProperties", matchers...) + return &Plugin_GetProperties{Call: c_call} +} + +// GetProperties provides a mock function with given fields: +func (_m *Plugin) GetProperties() core.PluginProperties { + ret := _m.Called() + + var r0 core.PluginProperties + if rf, ok := ret.Get(0).(func() core.PluginProperties); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(core.PluginProperties) + } + + return r0 +} + +type Plugin_Handle struct { + *mock.Call +} + +func (_m Plugin_Handle) Return(_a0 core.Transition, _a1 error) *Plugin_Handle { + return &Plugin_Handle{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Plugin) OnHandle(ctx context.Context, tCtx core.TaskExecutionContext) *Plugin_Handle { + c_call := _m.On("Handle", ctx, tCtx) + return &Plugin_Handle{Call: c_call} +} + +func (_m *Plugin) OnHandleMatch(matchers ...interface{}) *Plugin_Handle { + c_call := _m.On("Handle", matchers...) + return &Plugin_Handle{Call: c_call} +} + +// Handle provides a mock function with given fields: ctx, tCtx +func (_m *Plugin) Handle(ctx context.Context, tCtx core.TaskExecutionContext) (core.Transition, error) { + ret := _m.Called(ctx, tCtx) + + var r0 core.Transition + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionContext) core.Transition); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Get(0).(core.Transition) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, core.TaskExecutionContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_reader.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_reader.go new file mode 100644 index 0000000000..ffbb8e982c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_reader.go @@ -0,0 +1,81 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// PluginStateReader is an autogenerated mock type for the PluginStateReader type +type PluginStateReader struct { + mock.Mock +} + +type PluginStateReader_Get struct { + *mock.Call +} + +func (_m PluginStateReader_Get) Return(stateVersion uint8, err error) *PluginStateReader_Get { + return &PluginStateReader_Get{Call: _m.Call.Return(stateVersion, err)} +} + +func (_m *PluginStateReader) OnGet(t interface{}) *PluginStateReader_Get { + c_call := _m.On("Get", t) + return &PluginStateReader_Get{Call: c_call} +} + +func (_m *PluginStateReader) OnGetMatch(matchers ...interface{}) *PluginStateReader_Get { + c_call := _m.On("Get", matchers...) + return &PluginStateReader_Get{Call: c_call} +} + +// Get provides a mock function with given fields: t +func (_m *PluginStateReader) Get(t interface{}) (uint8, error) { + ret := _m.Called(t) + + var r0 uint8 + if rf, ok := ret.Get(0).(func(interface{}) uint8); ok { + r0 = rf(t) + } else { + r0 = ret.Get(0).(uint8) + } + + var r1 error + if rf, ok := ret.Get(1).(func(interface{}) error); ok { + r1 = rf(t) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type PluginStateReader_GetStateVersion struct { + *mock.Call +} + +func (_m PluginStateReader_GetStateVersion) Return(_a0 uint8) *PluginStateReader_GetStateVersion { + return &PluginStateReader_GetStateVersion{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginStateReader) OnGetStateVersion() *PluginStateReader_GetStateVersion { + c_call := _m.On("GetStateVersion") + return &PluginStateReader_GetStateVersion{Call: c_call} +} + +func (_m *PluginStateReader) OnGetStateVersionMatch(matchers ...interface{}) *PluginStateReader_GetStateVersion { + c_call := _m.On("GetStateVersion", matchers...) + return &PluginStateReader_GetStateVersion{Call: c_call} +} + +// GetStateVersion provides a mock function with given fields: +func (_m *PluginStateReader) GetStateVersion() uint8 { + ret := _m.Called() + + var r0 uint8 + if rf, ok := ret.Get(0).(func() uint8); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint8) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_writer.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_writer.go new file mode 100644 index 0000000000..b0ec62e5fe --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/plugin_state_writer.go @@ -0,0 +1,74 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// PluginStateWriter is an autogenerated mock type for the PluginStateWriter type +type PluginStateWriter struct { + mock.Mock +} + +type PluginStateWriter_Put struct { + *mock.Call +} + +func (_m PluginStateWriter_Put) Return(_a0 error) *PluginStateWriter_Put { + return &PluginStateWriter_Put{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginStateWriter) OnPut(stateVersion uint8, v interface{}) *PluginStateWriter_Put { + c_call := _m.On("Put", stateVersion, v) + return &PluginStateWriter_Put{Call: c_call} +} + +func (_m *PluginStateWriter) OnPutMatch(matchers ...interface{}) *PluginStateWriter_Put { + c_call := _m.On("Put", matchers...) + return &PluginStateWriter_Put{Call: c_call} +} + +// Put provides a mock function with given fields: stateVersion, v +func (_m *PluginStateWriter) Put(stateVersion uint8, v interface{}) error { + ret := _m.Called(stateVersion, v) + + var r0 error + if rf, ok := ret.Get(0).(func(uint8, interface{}) error); ok { + r0 = rf(stateVersion, v) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type PluginStateWriter_Reset struct { + *mock.Call +} + +func (_m PluginStateWriter_Reset) Return(_a0 error) *PluginStateWriter_Reset { + return &PluginStateWriter_Reset{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginStateWriter) OnReset() *PluginStateWriter_Reset { + c_call := _m.On("Reset") + return &PluginStateWriter_Reset{Call: c_call} +} + +func (_m *PluginStateWriter) OnResetMatch(matchers ...interface{}) *PluginStateWriter_Reset { + c_call := _m.On("Reset", matchers...) + return &PluginStateWriter_Reset{Call: c_call} +} + +// Reset provides a mock function with given fields: +func (_m *PluginStateWriter) Reset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_manager.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_manager.go new file mode 100644 index 0000000000..04a0f05977 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_manager.go @@ -0,0 +1,118 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// ResourceManager is an autogenerated mock type for the ResourceManager type +type ResourceManager struct { + mock.Mock +} + +type ResourceManager_AllocateResource struct { + *mock.Call +} + +func (_m ResourceManager_AllocateResource) Return(_a0 core.AllocationStatus, _a1 error) *ResourceManager_AllocateResource { + return &ResourceManager_AllocateResource{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *ResourceManager) OnAllocateResource(ctx context.Context, namespace core.ResourceNamespace, allocationToken string, constraintsSpec core.ResourceConstraintsSpec) *ResourceManager_AllocateResource { + c_call := _m.On("AllocateResource", ctx, namespace, allocationToken, constraintsSpec) + return &ResourceManager_AllocateResource{Call: c_call} +} + +func (_m *ResourceManager) OnAllocateResourceMatch(matchers ...interface{}) *ResourceManager_AllocateResource { + c_call := _m.On("AllocateResource", matchers...) + return &ResourceManager_AllocateResource{Call: c_call} +} + +// AllocateResource provides a mock function with given fields: ctx, namespace, allocationToken, constraintsSpec +func (_m *ResourceManager) AllocateResource(ctx context.Context, namespace core.ResourceNamespace, allocationToken string, constraintsSpec core.ResourceConstraintsSpec) (core.AllocationStatus, error) { + ret := _m.Called(ctx, namespace, allocationToken, constraintsSpec) + + var r0 core.AllocationStatus + if rf, ok := ret.Get(0).(func(context.Context, core.ResourceNamespace, string, core.ResourceConstraintsSpec) core.AllocationStatus); ok { + r0 = rf(ctx, namespace, allocationToken, constraintsSpec) + } else { + r0 = ret.Get(0).(core.AllocationStatus) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, core.ResourceNamespace, string, core.ResourceConstraintsSpec) error); ok { + r1 = rf(ctx, namespace, allocationToken, constraintsSpec) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type ResourceManager_GetID struct { + *mock.Call +} + +func (_m ResourceManager_GetID) Return(_a0 string) *ResourceManager_GetID { + return &ResourceManager_GetID{Call: _m.Call.Return(_a0)} +} + +func (_m *ResourceManager) OnGetID() *ResourceManager_GetID { + c_call := _m.On("GetID") + return &ResourceManager_GetID{Call: c_call} +} + +func (_m *ResourceManager) OnGetIDMatch(matchers ...interface{}) *ResourceManager_GetID { + c_call := _m.On("GetID", matchers...) + return &ResourceManager_GetID{Call: c_call} +} + +// GetID provides a mock function with given fields: +func (_m *ResourceManager) GetID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type ResourceManager_ReleaseResource struct { + *mock.Call +} + +func (_m ResourceManager_ReleaseResource) Return(_a0 error) *ResourceManager_ReleaseResource { + return &ResourceManager_ReleaseResource{Call: _m.Call.Return(_a0)} +} + +func (_m *ResourceManager) OnReleaseResource(ctx context.Context, namespace core.ResourceNamespace, allocationToken string) *ResourceManager_ReleaseResource { + c_call := _m.On("ReleaseResource", ctx, namespace, allocationToken) + return &ResourceManager_ReleaseResource{Call: c_call} +} + +func (_m *ResourceManager) OnReleaseResourceMatch(matchers ...interface{}) *ResourceManager_ReleaseResource { + c_call := _m.On("ReleaseResource", matchers...) + return &ResourceManager_ReleaseResource{Call: c_call} +} + +// ReleaseResource provides a mock function with given fields: ctx, namespace, allocationToken +func (_m *ResourceManager) ReleaseResource(ctx context.Context, namespace core.ResourceNamespace, allocationToken string) error { + ret := _m.Called(ctx, namespace, allocationToken) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.ResourceNamespace, string) error); ok { + r0 = rf(ctx, namespace, allocationToken) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_negotiator.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_negotiator.go new file mode 100644 index 0000000000..eabec21e8e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_negotiator.go @@ -0,0 +1,47 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// ResourceRegistrar is an autogenerated mock type for the ResourceRegistrar type +type ResourceNegotiator struct { + mock.Mock +} + +type ResourceNegotiator_RegisterResourceQuota struct { + *mock.Call +} + +func (_m ResourceNegotiator_RegisterResourceQuota) Return(_a0 error) *ResourceNegotiator_RegisterResourceQuota { + return &ResourceNegotiator_RegisterResourceQuota{Call: _m.Call.Return(_a0)} +} + +func (_m *ResourceNegotiator) OnRegisterResourceQuota(ctx context.Context, namespace core.ResourceNamespace, quota int) *ResourceNegotiator_RegisterResourceQuota { + c := _m.On("RegisterResourceQuota", ctx, namespace, quota) + return &ResourceNegotiator_RegisterResourceQuota{Call: c} +} + +func (_m *ResourceNegotiator) OnRegisterResourceQuotaMatch(matchers ...interface{}) *ResourceNegotiator_RegisterResourceQuota { + c := _m.On("RegisterResourceQuota", matchers...) + return &ResourceNegotiator_RegisterResourceQuota{Call: c} +} + +// RegisterResourceQuota provides a mock function with given fields: ctx, namespace, quota +func (_m *ResourceNegotiator) RegisterResourceQuota(ctx context.Context, namespace core.ResourceNamespace, quota int) error { + ret := _m.Called(ctx, namespace, quota) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.ResourceNamespace, int) error); ok { + r0 = rf(ctx, namespace, quota) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_registrar.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_registrar.go new file mode 100644 index 0000000000..444602769b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/resource_registrar.go @@ -0,0 +1,47 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// ResourceRegistrar is an autogenerated mock type for the ResourceRegistrar type +type ResourceRegistrar struct { + mock.Mock +} + +type ResourceRegistrar_RegisterResourceQuota struct { + *mock.Call +} + +func (_m ResourceRegistrar_RegisterResourceQuota) Return(_a0 error) *ResourceRegistrar_RegisterResourceQuota { + return &ResourceRegistrar_RegisterResourceQuota{Call: _m.Call.Return(_a0)} +} + +func (_m *ResourceRegistrar) OnRegisterResourceQuota(ctx context.Context, namespace core.ResourceNamespace, quota int) *ResourceRegistrar_RegisterResourceQuota { + c_call := _m.On("RegisterResourceQuota", ctx, namespace, quota) + return &ResourceRegistrar_RegisterResourceQuota{Call: c_call} +} + +func (_m *ResourceRegistrar) OnRegisterResourceQuotaMatch(matchers ...interface{}) *ResourceRegistrar_RegisterResourceQuota { + c_call := _m.On("RegisterResourceQuota", matchers...) + return &ResourceRegistrar_RegisterResourceQuota{Call: c_call} +} + +// RegisterResourceQuota provides a mock function with given fields: ctx, namespace, quota +func (_m *ResourceRegistrar) RegisterResourceQuota(ctx context.Context, namespace core.ResourceNamespace, quota int) error { + ret := _m.Called(ctx, namespace, quota) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, core.ResourceNamespace, int) error); ok { + r0 = rf(ctx, namespace, quota) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/secret_manager.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/secret_manager.go new file mode 100644 index 0000000000..9b882aaf26 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/secret_manager.go @@ -0,0 +1,53 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" +) + +// SecretManager is an autogenerated mock type for the SecretManager type +type SecretManager struct { + mock.Mock +} + +type SecretManager_Get struct { + *mock.Call +} + +func (_m SecretManager_Get) Return(_a0 string, _a1 error) *SecretManager_Get { + return &SecretManager_Get{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *SecretManager) OnGet(ctx context.Context, key string) *SecretManager_Get { + c_call := _m.On("Get", ctx, key) + return &SecretManager_Get{Call: c_call} +} + +func (_m *SecretManager) OnGetMatch(matchers ...interface{}) *SecretManager_Get { + c_call := _m.On("Get", matchers...) + return &SecretManager_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx, key +func (_m *SecretManager) Get(ctx context.Context, key string) (string, error) { + ret := _m.Called(ctx, key) + + var r0 string + if rf, ok := ret.Get(0).(func(context.Context, string) string); ok { + r0 = rf(ctx, key) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, key) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/setup_context.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/setup_context.go new file mode 100644 index 0000000000..e08e747be4 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/setup_context.go @@ -0,0 +1,251 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" + + promutils "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +// SetupContext is an autogenerated mock type for the SetupContext type +type SetupContext struct { + mock.Mock +} + +type SetupContext_EnqueueOwner struct { + *mock.Call +} + +func (_m SetupContext_EnqueueOwner) Return(_a0 core.EnqueueOwner) *SetupContext_EnqueueOwner { + return &SetupContext_EnqueueOwner{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnEnqueueOwner() *SetupContext_EnqueueOwner { + c_call := _m.On("EnqueueOwner") + return &SetupContext_EnqueueOwner{Call: c_call} +} + +func (_m *SetupContext) OnEnqueueOwnerMatch(matchers ...interface{}) *SetupContext_EnqueueOwner { + c_call := _m.On("EnqueueOwner", matchers...) + return &SetupContext_EnqueueOwner{Call: c_call} +} + +// EnqueueOwner provides a mock function with given fields: +func (_m *SetupContext) EnqueueOwner() core.EnqueueOwner { + ret := _m.Called() + + var r0 core.EnqueueOwner + if rf, ok := ret.Get(0).(func() core.EnqueueOwner); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.EnqueueOwner) + } + } + + return r0 +} + +type SetupContext_IncludeEnqueueLabels struct { + *mock.Call +} + +func (_m SetupContext_IncludeEnqueueLabels) Return(_a0 []string) *SetupContext_IncludeEnqueueLabels { + return &SetupContext_IncludeEnqueueLabels{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnIncludeEnqueueLabels() *SetupContext_IncludeEnqueueLabels { + c_call := _m.On("IncludeEnqueueLabels") + return &SetupContext_IncludeEnqueueLabels{Call: c_call} +} + +func (_m *SetupContext) OnIncludeEnqueueLabelsMatch(matchers ...interface{}) *SetupContext_IncludeEnqueueLabels { + c_call := _m.On("IncludeEnqueueLabels", matchers...) + return &SetupContext_IncludeEnqueueLabels{Call: c_call} +} + +// IncludeEnqueueLabels provides a mock function with given fields: +func (_m *SetupContext) IncludeEnqueueLabels() []string { + ret := _m.Called() + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +type SetupContext_KubeClient struct { + *mock.Call +} + +func (_m SetupContext_KubeClient) Return(_a0 core.KubeClient) *SetupContext_KubeClient { + return &SetupContext_KubeClient{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnKubeClient() *SetupContext_KubeClient { + c_call := _m.On("KubeClient") + return &SetupContext_KubeClient{Call: c_call} +} + +func (_m *SetupContext) OnKubeClientMatch(matchers ...interface{}) *SetupContext_KubeClient { + c_call := _m.On("KubeClient", matchers...) + return &SetupContext_KubeClient{Call: c_call} +} + +// KubeClient provides a mock function with given fields: +func (_m *SetupContext) KubeClient() core.KubeClient { + ret := _m.Called() + + var r0 core.KubeClient + if rf, ok := ret.Get(0).(func() core.KubeClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.KubeClient) + } + } + + return r0 +} + +type SetupContext_MetricsScope struct { + *mock.Call +} + +func (_m SetupContext_MetricsScope) Return(_a0 promutils.Scope) *SetupContext_MetricsScope { + return &SetupContext_MetricsScope{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnMetricsScope() *SetupContext_MetricsScope { + c_call := _m.On("MetricsScope") + return &SetupContext_MetricsScope{Call: c_call} +} + +func (_m *SetupContext) OnMetricsScopeMatch(matchers ...interface{}) *SetupContext_MetricsScope { + c_call := _m.On("MetricsScope", matchers...) + return &SetupContext_MetricsScope{Call: c_call} +} + +// MetricsScope provides a mock function with given fields: +func (_m *SetupContext) MetricsScope() promutils.Scope { + ret := _m.Called() + + var r0 promutils.Scope + if rf, ok := ret.Get(0).(func() promutils.Scope); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(promutils.Scope) + } + } + + return r0 +} + +type SetupContext_OwnerKind struct { + *mock.Call +} + +func (_m SetupContext_OwnerKind) Return(_a0 string) *SetupContext_OwnerKind { + return &SetupContext_OwnerKind{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnOwnerKind() *SetupContext_OwnerKind { + c_call := _m.On("OwnerKind") + return &SetupContext_OwnerKind{Call: c_call} +} + +func (_m *SetupContext) OnOwnerKindMatch(matchers ...interface{}) *SetupContext_OwnerKind { + c_call := _m.On("OwnerKind", matchers...) + return &SetupContext_OwnerKind{Call: c_call} +} + +// OwnerKind provides a mock function with given fields: +func (_m *SetupContext) OwnerKind() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type SetupContext_ResourceRegistrar struct { + *mock.Call +} + +func (_m SetupContext_ResourceRegistrar) Return(_a0 core.ResourceRegistrar) *SetupContext_ResourceRegistrar { + return &SetupContext_ResourceRegistrar{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnResourceRegistrar() *SetupContext_ResourceRegistrar { + c_call := _m.On("ResourceRegistrar") + return &SetupContext_ResourceRegistrar{Call: c_call} +} + +func (_m *SetupContext) OnResourceRegistrarMatch(matchers ...interface{}) *SetupContext_ResourceRegistrar { + c_call := _m.On("ResourceRegistrar", matchers...) + return &SetupContext_ResourceRegistrar{Call: c_call} +} + +// ResourceRegistrar provides a mock function with given fields: +func (_m *SetupContext) ResourceRegistrar() core.ResourceRegistrar { + ret := _m.Called() + + var r0 core.ResourceRegistrar + if rf, ok := ret.Get(0).(func() core.ResourceRegistrar); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.ResourceRegistrar) + } + } + + return r0 +} + +type SetupContext_SecretManager struct { + *mock.Call +} + +func (_m SetupContext_SecretManager) Return(_a0 core.SecretManager) *SetupContext_SecretManager { + return &SetupContext_SecretManager{Call: _m.Call.Return(_a0)} +} + +func (_m *SetupContext) OnSecretManager() *SetupContext_SecretManager { + c_call := _m.On("SecretManager") + return &SetupContext_SecretManager{Call: c_call} +} + +func (_m *SetupContext) OnSecretManagerMatch(matchers ...interface{}) *SetupContext_SecretManager { + c_call := _m.On("SecretManager", matchers...) + return &SetupContext_SecretManager{Call: c_call} +} + +// SecretManager provides a mock function with given fields: +func (_m *SetupContext) SecretManager() core.SecretManager { + ret := _m.Called() + + var r0 core.SecretManager + if rf, ok := ret.Get(0).(func() core.SecretManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SecretManager) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_context.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_context.go new file mode 100644 index 0000000000..82c29f06b3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_context.go @@ -0,0 +1,427 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + catalog "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// TaskExecutionContext is an autogenerated mock type for the TaskExecutionContext type +type TaskExecutionContext struct { + mock.Mock +} + +type TaskExecutionContext_Catalog struct { + *mock.Call +} + +func (_m TaskExecutionContext_Catalog) Return(_a0 catalog.AsyncClient) *TaskExecutionContext_Catalog { + return &TaskExecutionContext_Catalog{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnCatalog() *TaskExecutionContext_Catalog { + c_call := _m.On("Catalog") + return &TaskExecutionContext_Catalog{Call: c_call} +} + +func (_m *TaskExecutionContext) OnCatalogMatch(matchers ...interface{}) *TaskExecutionContext_Catalog { + c_call := _m.On("Catalog", matchers...) + return &TaskExecutionContext_Catalog{Call: c_call} +} + +// Catalog provides a mock function with given fields: +func (_m *TaskExecutionContext) Catalog() catalog.AsyncClient { + ret := _m.Called() + + var r0 catalog.AsyncClient + if rf, ok := ret.Get(0).(func() catalog.AsyncClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(catalog.AsyncClient) + } + } + + return r0 +} + +type TaskExecutionContext_DataStore struct { + *mock.Call +} + +func (_m TaskExecutionContext_DataStore) Return(_a0 *storage.DataStore) *TaskExecutionContext_DataStore { + return &TaskExecutionContext_DataStore{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnDataStore() *TaskExecutionContext_DataStore { + c_call := _m.On("DataStore") + return &TaskExecutionContext_DataStore{Call: c_call} +} + +func (_m *TaskExecutionContext) OnDataStoreMatch(matchers ...interface{}) *TaskExecutionContext_DataStore { + c_call := _m.On("DataStore", matchers...) + return &TaskExecutionContext_DataStore{Call: c_call} +} + +// DataStore provides a mock function with given fields: +func (_m *TaskExecutionContext) DataStore() *storage.DataStore { + ret := _m.Called() + + var r0 *storage.DataStore + if rf, ok := ret.Get(0).(func() *storage.DataStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storage.DataStore) + } + } + + return r0 +} + +type TaskExecutionContext_EventsRecorder struct { + *mock.Call +} + +func (_m TaskExecutionContext_EventsRecorder) Return(_a0 core.EventsRecorder) *TaskExecutionContext_EventsRecorder { + return &TaskExecutionContext_EventsRecorder{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnEventsRecorder() *TaskExecutionContext_EventsRecorder { + c_call := _m.On("EventsRecorder") + return &TaskExecutionContext_EventsRecorder{Call: c_call} +} + +func (_m *TaskExecutionContext) OnEventsRecorderMatch(matchers ...interface{}) *TaskExecutionContext_EventsRecorder { + c_call := _m.On("EventsRecorder", matchers...) + return &TaskExecutionContext_EventsRecorder{Call: c_call} +} + +// EventsRecorder provides a mock function with given fields: +func (_m *TaskExecutionContext) EventsRecorder() core.EventsRecorder { + ret := _m.Called() + + var r0 core.EventsRecorder + if rf, ok := ret.Get(0).(func() core.EventsRecorder); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.EventsRecorder) + } + } + + return r0 +} + +type TaskExecutionContext_InputReader struct { + *mock.Call +} + +func (_m TaskExecutionContext_InputReader) Return(_a0 io.InputReader) *TaskExecutionContext_InputReader { + return &TaskExecutionContext_InputReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnInputReader() *TaskExecutionContext_InputReader { + c_call := _m.On("InputReader") + return &TaskExecutionContext_InputReader{Call: c_call} +} + +func (_m *TaskExecutionContext) OnInputReaderMatch(matchers ...interface{}) *TaskExecutionContext_InputReader { + c_call := _m.On("InputReader", matchers...) + return &TaskExecutionContext_InputReader{Call: c_call} +} + +// InputReader provides a mock function with given fields: +func (_m *TaskExecutionContext) InputReader() io.InputReader { + ret := _m.Called() + + var r0 io.InputReader + if rf, ok := ret.Get(0).(func() io.InputReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.InputReader) + } + } + + return r0 +} + +type TaskExecutionContext_OutputWriter struct { + *mock.Call +} + +func (_m TaskExecutionContext_OutputWriter) Return(_a0 io.OutputWriter) *TaskExecutionContext_OutputWriter { + return &TaskExecutionContext_OutputWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnOutputWriter() *TaskExecutionContext_OutputWriter { + c_call := _m.On("OutputWriter") + return &TaskExecutionContext_OutputWriter{Call: c_call} +} + +func (_m *TaskExecutionContext) OnOutputWriterMatch(matchers ...interface{}) *TaskExecutionContext_OutputWriter { + c_call := _m.On("OutputWriter", matchers...) + return &TaskExecutionContext_OutputWriter{Call: c_call} +} + +// OutputWriter provides a mock function with given fields: +func (_m *TaskExecutionContext) OutputWriter() io.OutputWriter { + ret := _m.Called() + + var r0 io.OutputWriter + if rf, ok := ret.Get(0).(func() io.OutputWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.OutputWriter) + } + } + + return r0 +} + +type TaskExecutionContext_PluginStateReader struct { + *mock.Call +} + +func (_m TaskExecutionContext_PluginStateReader) Return(_a0 core.PluginStateReader) *TaskExecutionContext_PluginStateReader { + return &TaskExecutionContext_PluginStateReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnPluginStateReader() *TaskExecutionContext_PluginStateReader { + c_call := _m.On("PluginStateReader") + return &TaskExecutionContext_PluginStateReader{Call: c_call} +} + +func (_m *TaskExecutionContext) OnPluginStateReaderMatch(matchers ...interface{}) *TaskExecutionContext_PluginStateReader { + c_call := _m.On("PluginStateReader", matchers...) + return &TaskExecutionContext_PluginStateReader{Call: c_call} +} + +// PluginStateReader provides a mock function with given fields: +func (_m *TaskExecutionContext) PluginStateReader() core.PluginStateReader { + ret := _m.Called() + + var r0 core.PluginStateReader + if rf, ok := ret.Get(0).(func() core.PluginStateReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.PluginStateReader) + } + } + + return r0 +} + +type TaskExecutionContext_PluginStateWriter struct { + *mock.Call +} + +func (_m TaskExecutionContext_PluginStateWriter) Return(_a0 core.PluginStateWriter) *TaskExecutionContext_PluginStateWriter { + return &TaskExecutionContext_PluginStateWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnPluginStateWriter() *TaskExecutionContext_PluginStateWriter { + c_call := _m.On("PluginStateWriter") + return &TaskExecutionContext_PluginStateWriter{Call: c_call} +} + +func (_m *TaskExecutionContext) OnPluginStateWriterMatch(matchers ...interface{}) *TaskExecutionContext_PluginStateWriter { + c_call := _m.On("PluginStateWriter", matchers...) + return &TaskExecutionContext_PluginStateWriter{Call: c_call} +} + +// PluginStateWriter provides a mock function with given fields: +func (_m *TaskExecutionContext) PluginStateWriter() core.PluginStateWriter { + ret := _m.Called() + + var r0 core.PluginStateWriter + if rf, ok := ret.Get(0).(func() core.PluginStateWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.PluginStateWriter) + } + } + + return r0 +} + +type TaskExecutionContext_ResourceManager struct { + *mock.Call +} + +func (_m TaskExecutionContext_ResourceManager) Return(_a0 core.ResourceManager) *TaskExecutionContext_ResourceManager { + return &TaskExecutionContext_ResourceManager{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnResourceManager() *TaskExecutionContext_ResourceManager { + c_call := _m.On("ResourceManager") + return &TaskExecutionContext_ResourceManager{Call: c_call} +} + +func (_m *TaskExecutionContext) OnResourceManagerMatch(matchers ...interface{}) *TaskExecutionContext_ResourceManager { + c_call := _m.On("ResourceManager", matchers...) + return &TaskExecutionContext_ResourceManager{Call: c_call} +} + +// ResourceManager provides a mock function with given fields: +func (_m *TaskExecutionContext) ResourceManager() core.ResourceManager { + ret := _m.Called() + + var r0 core.ResourceManager + if rf, ok := ret.Get(0).(func() core.ResourceManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.ResourceManager) + } + } + + return r0 +} + +type TaskExecutionContext_SecretManager struct { + *mock.Call +} + +func (_m TaskExecutionContext_SecretManager) Return(_a0 core.SecretManager) *TaskExecutionContext_SecretManager { + return &TaskExecutionContext_SecretManager{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnSecretManager() *TaskExecutionContext_SecretManager { + c_call := _m.On("SecretManager") + return &TaskExecutionContext_SecretManager{Call: c_call} +} + +func (_m *TaskExecutionContext) OnSecretManagerMatch(matchers ...interface{}) *TaskExecutionContext_SecretManager { + c_call := _m.On("SecretManager", matchers...) + return &TaskExecutionContext_SecretManager{Call: c_call} +} + +// SecretManager provides a mock function with given fields: +func (_m *TaskExecutionContext) SecretManager() core.SecretManager { + ret := _m.Called() + + var r0 core.SecretManager + if rf, ok := ret.Get(0).(func() core.SecretManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SecretManager) + } + } + + return r0 +} + +type TaskExecutionContext_TaskExecutionMetadata struct { + *mock.Call +} + +func (_m TaskExecutionContext_TaskExecutionMetadata) Return(_a0 core.TaskExecutionMetadata) *TaskExecutionContext_TaskExecutionMetadata { + return &TaskExecutionContext_TaskExecutionMetadata{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnTaskExecutionMetadata() *TaskExecutionContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata") + return &TaskExecutionContext_TaskExecutionMetadata{Call: c_call} +} + +func (_m *TaskExecutionContext) OnTaskExecutionMetadataMatch(matchers ...interface{}) *TaskExecutionContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata", matchers...) + return &TaskExecutionContext_TaskExecutionMetadata{Call: c_call} +} + +// TaskExecutionMetadata provides a mock function with given fields: +func (_m *TaskExecutionContext) TaskExecutionMetadata() core.TaskExecutionMetadata { + ret := _m.Called() + + var r0 core.TaskExecutionMetadata + if rf, ok := ret.Get(0).(func() core.TaskExecutionMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionMetadata) + } + } + + return r0 +} + +type TaskExecutionContext_TaskReader struct { + *mock.Call +} + +func (_m TaskExecutionContext_TaskReader) Return(_a0 core.TaskReader) *TaskExecutionContext_TaskReader { + return &TaskExecutionContext_TaskReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnTaskReader() *TaskExecutionContext_TaskReader { + c_call := _m.On("TaskReader") + return &TaskExecutionContext_TaskReader{Call: c_call} +} + +func (_m *TaskExecutionContext) OnTaskReaderMatch(matchers ...interface{}) *TaskExecutionContext_TaskReader { + c_call := _m.On("TaskReader", matchers...) + return &TaskExecutionContext_TaskReader{Call: c_call} +} + +// TaskReader provides a mock function with given fields: +func (_m *TaskExecutionContext) TaskReader() core.TaskReader { + ret := _m.Called() + + var r0 core.TaskReader + if rf, ok := ret.Get(0).(func() core.TaskReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskReader) + } + } + + return r0 +} + +type TaskExecutionContext_TaskRefreshIndicator struct { + *mock.Call +} + +func (_m TaskExecutionContext_TaskRefreshIndicator) Return(_a0 core.SignalAsync) *TaskExecutionContext_TaskRefreshIndicator { + return &TaskExecutionContext_TaskRefreshIndicator{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnTaskRefreshIndicator() *TaskExecutionContext_TaskRefreshIndicator { + c_call := _m.On("TaskRefreshIndicator") + return &TaskExecutionContext_TaskRefreshIndicator{Call: c_call} +} + +func (_m *TaskExecutionContext) OnTaskRefreshIndicatorMatch(matchers ...interface{}) *TaskExecutionContext_TaskRefreshIndicator { + c_call := _m.On("TaskRefreshIndicator", matchers...) + return &TaskExecutionContext_TaskRefreshIndicator{Call: c_call} +} + +// TaskRefreshIndicator provides a mock function with given fields: +func (_m *TaskExecutionContext) TaskRefreshIndicator() core.SignalAsync { + ret := _m.Called() + + var r0 core.SignalAsync + if rf, ok := ret.Get(0).(func() core.SignalAsync); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SignalAsync) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_id.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_id.go new file mode 100644 index 0000000000..8177b5b8d9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_id.go @@ -0,0 +1,148 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + flyteidlcore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + mock "github.com/stretchr/testify/mock" +) + +// TaskExecutionID is an autogenerated mock type for the TaskExecutionID type +type TaskExecutionID struct { + mock.Mock +} + +type TaskExecutionID_GetGeneratedName struct { + *mock.Call +} + +func (_m TaskExecutionID_GetGeneratedName) Return(_a0 string) *TaskExecutionID_GetGeneratedName { + return &TaskExecutionID_GetGeneratedName{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionID) OnGetGeneratedName() *TaskExecutionID_GetGeneratedName { + c_call := _m.On("GetGeneratedName") + return &TaskExecutionID_GetGeneratedName{Call: c_call} +} + +func (_m *TaskExecutionID) OnGetGeneratedNameMatch(matchers ...interface{}) *TaskExecutionID_GetGeneratedName { + c_call := _m.On("GetGeneratedName", matchers...) + return &TaskExecutionID_GetGeneratedName{Call: c_call} +} + +// GetGeneratedName provides a mock function with given fields: +func (_m *TaskExecutionID) GetGeneratedName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type TaskExecutionID_GetGeneratedNameWith struct { + *mock.Call +} + +func (_m TaskExecutionID_GetGeneratedNameWith) Return(_a0 string, _a1 error) *TaskExecutionID_GetGeneratedNameWith { + return &TaskExecutionID_GetGeneratedNameWith{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *TaskExecutionID) OnGetGeneratedNameWith(minLength int, maxLength int) *TaskExecutionID_GetGeneratedNameWith { + c_call := _m.On("GetGeneratedNameWith", minLength, maxLength) + return &TaskExecutionID_GetGeneratedNameWith{Call: c_call} +} + +func (_m *TaskExecutionID) OnGetGeneratedNameWithMatch(matchers ...interface{}) *TaskExecutionID_GetGeneratedNameWith { + c_call := _m.On("GetGeneratedNameWith", matchers...) + return &TaskExecutionID_GetGeneratedNameWith{Call: c_call} +} + +// GetGeneratedNameWith provides a mock function with given fields: minLength, maxLength +func (_m *TaskExecutionID) GetGeneratedNameWith(minLength int, maxLength int) (string, error) { + ret := _m.Called(minLength, maxLength) + + var r0 string + if rf, ok := ret.Get(0).(func(int, int) string); ok { + r0 = rf(minLength, maxLength) + } else { + r0 = ret.Get(0).(string) + } + + var r1 error + if rf, ok := ret.Get(1).(func(int, int) error); ok { + r1 = rf(minLength, maxLength) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type TaskExecutionID_GetID struct { + *mock.Call +} + +func (_m TaskExecutionID_GetID) Return(_a0 flyteidlcore.TaskExecutionIdentifier) *TaskExecutionID_GetID { + return &TaskExecutionID_GetID{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionID) OnGetID() *TaskExecutionID_GetID { + c_call := _m.On("GetID") + return &TaskExecutionID_GetID{Call: c_call} +} + +func (_m *TaskExecutionID) OnGetIDMatch(matchers ...interface{}) *TaskExecutionID_GetID { + c_call := _m.On("GetID", matchers...) + return &TaskExecutionID_GetID{Call: c_call} +} + +// GetID provides a mock function with given fields: +func (_m *TaskExecutionID) GetID() flyteidlcore.TaskExecutionIdentifier { + ret := _m.Called() + + var r0 flyteidlcore.TaskExecutionIdentifier + if rf, ok := ret.Get(0).(func() flyteidlcore.TaskExecutionIdentifier); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(flyteidlcore.TaskExecutionIdentifier) + } + + return r0 +} + +type TaskExecutionID_GetUniqueNodeID struct { + *mock.Call +} + +func (_m TaskExecutionID_GetUniqueNodeID) Return(_a0 string) *TaskExecutionID_GetUniqueNodeID { + return &TaskExecutionID_GetUniqueNodeID{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionID) OnGetUniqueNodeID() *TaskExecutionID_GetUniqueNodeID { + c_call := _m.On("GetUniqueNodeID") + return &TaskExecutionID_GetUniqueNodeID{Call: c_call} +} + +func (_m *TaskExecutionID) OnGetUniqueNodeIDMatch(matchers ...interface{}) *TaskExecutionID_GetUniqueNodeID { + c_call := _m.On("GetUniqueNodeID", matchers...) + return &TaskExecutionID_GetUniqueNodeID{Call: c_call} +} + +// GetUniqueNodeID provides a mock function with given fields: +func (_m *TaskExecutionID) GetUniqueNodeID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_metadata.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_metadata.go new file mode 100644 index 0000000000..fa334857e0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_execution_metadata.go @@ -0,0 +1,545 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + corev1 "k8s.io/api/core/v1" + + flyteidlcore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + + mock "github.com/stretchr/testify/mock" + + types "k8s.io/apimachinery/pkg/types" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// TaskExecutionMetadata is an autogenerated mock type for the TaskExecutionMetadata type +type TaskExecutionMetadata struct { + mock.Mock +} + +type TaskExecutionMetadata_GetAnnotations struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetAnnotations) Return(_a0 map[string]string) *TaskExecutionMetadata_GetAnnotations { + return &TaskExecutionMetadata_GetAnnotations{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetAnnotations() *TaskExecutionMetadata_GetAnnotations { + c_call := _m.On("GetAnnotations") + return &TaskExecutionMetadata_GetAnnotations{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetAnnotationsMatch(matchers ...interface{}) *TaskExecutionMetadata_GetAnnotations { + c_call := _m.On("GetAnnotations", matchers...) + return &TaskExecutionMetadata_GetAnnotations{Call: c_call} +} + +// GetAnnotations provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetAnnotations() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type TaskExecutionMetadata_GetConsoleURL struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetConsoleURL) Return(_a0 string) *TaskExecutionMetadata_GetConsoleURL { + return &TaskExecutionMetadata_GetConsoleURL{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetConsoleURL() *TaskExecutionMetadata_GetConsoleURL { + c_call := _m.On("GetConsoleURL") + return &TaskExecutionMetadata_GetConsoleURL{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetConsoleURLMatch(matchers ...interface{}) *TaskExecutionMetadata_GetConsoleURL { + c_call := _m.On("GetConsoleURL", matchers...) + return &TaskExecutionMetadata_GetConsoleURL{Call: c_call} +} + +// GetConsoleURL provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetConsoleURL() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type TaskExecutionMetadata_GetEnvironmentVariables struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetEnvironmentVariables) Return(_a0 map[string]string) *TaskExecutionMetadata_GetEnvironmentVariables { + return &TaskExecutionMetadata_GetEnvironmentVariables{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetEnvironmentVariables() *TaskExecutionMetadata_GetEnvironmentVariables { + c_call := _m.On("GetEnvironmentVariables") + return &TaskExecutionMetadata_GetEnvironmentVariables{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetEnvironmentVariablesMatch(matchers ...interface{}) *TaskExecutionMetadata_GetEnvironmentVariables { + c_call := _m.On("GetEnvironmentVariables", matchers...) + return &TaskExecutionMetadata_GetEnvironmentVariables{Call: c_call} +} + +// GetEnvironmentVariables provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetEnvironmentVariables() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type TaskExecutionMetadata_GetExternalResourceAttributes struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetExternalResourceAttributes) Return(_a0 core.ExternalResourceAttributes) *TaskExecutionMetadata_GetExternalResourceAttributes { + return &TaskExecutionMetadata_GetExternalResourceAttributes{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetExternalResourceAttributes() *TaskExecutionMetadata_GetExternalResourceAttributes { + c_call := _m.On("GetExternalResourceAttributes") + return &TaskExecutionMetadata_GetExternalResourceAttributes{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetExternalResourceAttributesMatch(matchers ...interface{}) *TaskExecutionMetadata_GetExternalResourceAttributes { + c_call := _m.On("GetExternalResourceAttributes", matchers...) + return &TaskExecutionMetadata_GetExternalResourceAttributes{Call: c_call} +} + +// GetExternalResourceAttributes provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetExternalResourceAttributes() core.ExternalResourceAttributes { + ret := _m.Called() + + var r0 core.ExternalResourceAttributes + if rf, ok := ret.Get(0).(func() core.ExternalResourceAttributes); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(core.ExternalResourceAttributes) + } + + return r0 +} + +type TaskExecutionMetadata_GetInterruptibleFailureThreshold struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetInterruptibleFailureThreshold) Return(_a0 int32) *TaskExecutionMetadata_GetInterruptibleFailureThreshold { + return &TaskExecutionMetadata_GetInterruptibleFailureThreshold{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetInterruptibleFailureThreshold() *TaskExecutionMetadata_GetInterruptibleFailureThreshold { + c_call := _m.On("GetInterruptibleFailureThreshold") + return &TaskExecutionMetadata_GetInterruptibleFailureThreshold{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetInterruptibleFailureThresholdMatch(matchers ...interface{}) *TaskExecutionMetadata_GetInterruptibleFailureThreshold { + c_call := _m.On("GetInterruptibleFailureThreshold", matchers...) + return &TaskExecutionMetadata_GetInterruptibleFailureThreshold{Call: c_call} +} + +// GetInterruptibleFailureThreshold provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetInterruptibleFailureThreshold() int32 { + ret := _m.Called() + + var r0 int32 + if rf, ok := ret.Get(0).(func() int32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int32) + } + + return r0 +} + +type TaskExecutionMetadata_GetK8sServiceAccount struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetK8sServiceAccount) Return(_a0 string) *TaskExecutionMetadata_GetK8sServiceAccount { + return &TaskExecutionMetadata_GetK8sServiceAccount{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetK8sServiceAccount() *TaskExecutionMetadata_GetK8sServiceAccount { + c_call := _m.On("GetK8sServiceAccount") + return &TaskExecutionMetadata_GetK8sServiceAccount{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetK8sServiceAccountMatch(matchers ...interface{}) *TaskExecutionMetadata_GetK8sServiceAccount { + c_call := _m.On("GetK8sServiceAccount", matchers...) + return &TaskExecutionMetadata_GetK8sServiceAccount{Call: c_call} +} + +// GetK8sServiceAccount provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetK8sServiceAccount() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type TaskExecutionMetadata_GetLabels struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetLabels) Return(_a0 map[string]string) *TaskExecutionMetadata_GetLabels { + return &TaskExecutionMetadata_GetLabels{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetLabels() *TaskExecutionMetadata_GetLabels { + c_call := _m.On("GetLabels") + return &TaskExecutionMetadata_GetLabels{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetLabelsMatch(matchers ...interface{}) *TaskExecutionMetadata_GetLabels { + c_call := _m.On("GetLabels", matchers...) + return &TaskExecutionMetadata_GetLabels{Call: c_call} +} + +// GetLabels provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetLabels() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type TaskExecutionMetadata_GetMaxAttempts struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetMaxAttempts) Return(_a0 uint32) *TaskExecutionMetadata_GetMaxAttempts { + return &TaskExecutionMetadata_GetMaxAttempts{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetMaxAttempts() *TaskExecutionMetadata_GetMaxAttempts { + c_call := _m.On("GetMaxAttempts") + return &TaskExecutionMetadata_GetMaxAttempts{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetMaxAttemptsMatch(matchers ...interface{}) *TaskExecutionMetadata_GetMaxAttempts { + c_call := _m.On("GetMaxAttempts", matchers...) + return &TaskExecutionMetadata_GetMaxAttempts{Call: c_call} +} + +// GetMaxAttempts provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetMaxAttempts() uint32 { + ret := _m.Called() + + var r0 uint32 + if rf, ok := ret.Get(0).(func() uint32); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(uint32) + } + + return r0 +} + +type TaskExecutionMetadata_GetNamespace struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetNamespace) Return(_a0 string) *TaskExecutionMetadata_GetNamespace { + return &TaskExecutionMetadata_GetNamespace{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetNamespace() *TaskExecutionMetadata_GetNamespace { + c_call := _m.On("GetNamespace") + return &TaskExecutionMetadata_GetNamespace{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetNamespaceMatch(matchers ...interface{}) *TaskExecutionMetadata_GetNamespace { + c_call := _m.On("GetNamespace", matchers...) + return &TaskExecutionMetadata_GetNamespace{Call: c_call} +} + +// GetNamespace provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetNamespace() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type TaskExecutionMetadata_GetOverrides struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetOverrides) Return(_a0 core.TaskOverrides) *TaskExecutionMetadata_GetOverrides { + return &TaskExecutionMetadata_GetOverrides{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetOverrides() *TaskExecutionMetadata_GetOverrides { + c_call := _m.On("GetOverrides") + return &TaskExecutionMetadata_GetOverrides{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetOverridesMatch(matchers ...interface{}) *TaskExecutionMetadata_GetOverrides { + c_call := _m.On("GetOverrides", matchers...) + return &TaskExecutionMetadata_GetOverrides{Call: c_call} +} + +// GetOverrides provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetOverrides() core.TaskOverrides { + ret := _m.Called() + + var r0 core.TaskOverrides + if rf, ok := ret.Get(0).(func() core.TaskOverrides); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskOverrides) + } + } + + return r0 +} + +type TaskExecutionMetadata_GetOwnerID struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetOwnerID) Return(_a0 types.NamespacedName) *TaskExecutionMetadata_GetOwnerID { + return &TaskExecutionMetadata_GetOwnerID{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetOwnerID() *TaskExecutionMetadata_GetOwnerID { + c_call := _m.On("GetOwnerID") + return &TaskExecutionMetadata_GetOwnerID{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetOwnerIDMatch(matchers ...interface{}) *TaskExecutionMetadata_GetOwnerID { + c_call := _m.On("GetOwnerID", matchers...) + return &TaskExecutionMetadata_GetOwnerID{Call: c_call} +} + +// GetOwnerID provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetOwnerID() types.NamespacedName { + ret := _m.Called() + + var r0 types.NamespacedName + if rf, ok := ret.Get(0).(func() types.NamespacedName); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.NamespacedName) + } + + return r0 +} + +type TaskExecutionMetadata_GetOwnerReference struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetOwnerReference) Return(_a0 v1.OwnerReference) *TaskExecutionMetadata_GetOwnerReference { + return &TaskExecutionMetadata_GetOwnerReference{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetOwnerReference() *TaskExecutionMetadata_GetOwnerReference { + c_call := _m.On("GetOwnerReference") + return &TaskExecutionMetadata_GetOwnerReference{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetOwnerReferenceMatch(matchers ...interface{}) *TaskExecutionMetadata_GetOwnerReference { + c_call := _m.On("GetOwnerReference", matchers...) + return &TaskExecutionMetadata_GetOwnerReference{Call: c_call} +} + +// GetOwnerReference provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetOwnerReference() v1.OwnerReference { + ret := _m.Called() + + var r0 v1.OwnerReference + if rf, ok := ret.Get(0).(func() v1.OwnerReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(v1.OwnerReference) + } + + return r0 +} + +type TaskExecutionMetadata_GetPlatformResources struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetPlatformResources) Return(_a0 *corev1.ResourceRequirements) *TaskExecutionMetadata_GetPlatformResources { + return &TaskExecutionMetadata_GetPlatformResources{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetPlatformResources() *TaskExecutionMetadata_GetPlatformResources { + c_call := _m.On("GetPlatformResources") + return &TaskExecutionMetadata_GetPlatformResources{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetPlatformResourcesMatch(matchers ...interface{}) *TaskExecutionMetadata_GetPlatformResources { + c_call := _m.On("GetPlatformResources", matchers...) + return &TaskExecutionMetadata_GetPlatformResources{Call: c_call} +} + +// GetPlatformResources provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetPlatformResources() *corev1.ResourceRequirements { + ret := _m.Called() + + var r0 *corev1.ResourceRequirements + if rf, ok := ret.Get(0).(func() *corev1.ResourceRequirements); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*corev1.ResourceRequirements) + } + } + + return r0 +} + +type TaskExecutionMetadata_GetSecurityContext struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetSecurityContext) Return(_a0 flyteidlcore.SecurityContext) *TaskExecutionMetadata_GetSecurityContext { + return &TaskExecutionMetadata_GetSecurityContext{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetSecurityContext() *TaskExecutionMetadata_GetSecurityContext { + c_call := _m.On("GetSecurityContext") + return &TaskExecutionMetadata_GetSecurityContext{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetSecurityContextMatch(matchers ...interface{}) *TaskExecutionMetadata_GetSecurityContext { + c_call := _m.On("GetSecurityContext", matchers...) + return &TaskExecutionMetadata_GetSecurityContext{Call: c_call} +} + +// GetSecurityContext provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetSecurityContext() flyteidlcore.SecurityContext { + ret := _m.Called() + + var r0 flyteidlcore.SecurityContext + if rf, ok := ret.Get(0).(func() flyteidlcore.SecurityContext); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(flyteidlcore.SecurityContext) + } + + return r0 +} + +type TaskExecutionMetadata_GetTaskExecutionID struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_GetTaskExecutionID) Return(_a0 core.TaskExecutionID) *TaskExecutionMetadata_GetTaskExecutionID { + return &TaskExecutionMetadata_GetTaskExecutionID{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnGetTaskExecutionID() *TaskExecutionMetadata_GetTaskExecutionID { + c_call := _m.On("GetTaskExecutionID") + return &TaskExecutionMetadata_GetTaskExecutionID{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnGetTaskExecutionIDMatch(matchers ...interface{}) *TaskExecutionMetadata_GetTaskExecutionID { + c_call := _m.On("GetTaskExecutionID", matchers...) + return &TaskExecutionMetadata_GetTaskExecutionID{Call: c_call} +} + +// GetTaskExecutionID provides a mock function with given fields: +func (_m *TaskExecutionMetadata) GetTaskExecutionID() core.TaskExecutionID { + ret := _m.Called() + + var r0 core.TaskExecutionID + if rf, ok := ret.Get(0).(func() core.TaskExecutionID); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionID) + } + } + + return r0 +} + +type TaskExecutionMetadata_IsInterruptible struct { + *mock.Call +} + +func (_m TaskExecutionMetadata_IsInterruptible) Return(_a0 bool) *TaskExecutionMetadata_IsInterruptible { + return &TaskExecutionMetadata_IsInterruptible{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionMetadata) OnIsInterruptible() *TaskExecutionMetadata_IsInterruptible { + c_call := _m.On("IsInterruptible") + return &TaskExecutionMetadata_IsInterruptible{Call: c_call} +} + +func (_m *TaskExecutionMetadata) OnIsInterruptibleMatch(matchers ...interface{}) *TaskExecutionMetadata_IsInterruptible { + c_call := _m.On("IsInterruptible", matchers...) + return &TaskExecutionMetadata_IsInterruptible{Call: c_call} +} + +// IsInterruptible provides a mock function with given fields: +func (_m *TaskExecutionMetadata) IsInterruptible() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_overrides.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_overrides.go new file mode 100644 index 0000000000..a22332c4d8 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_overrides.go @@ -0,0 +1,217 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + flyteidlcore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + mock "github.com/stretchr/testify/mock" + + v1 "k8s.io/api/core/v1" +) + +// TaskOverrides is an autogenerated mock type for the TaskOverrides type +type TaskOverrides struct { + mock.Mock +} + +type TaskOverrides_GetConfig struct { + *mock.Call +} + +func (_m TaskOverrides_GetConfig) Return(_a0 map[string]string) *TaskOverrides_GetConfig { + return &TaskOverrides_GetConfig{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetConfig() *TaskOverrides_GetConfig { + c_call := _m.On("GetConfig") + return &TaskOverrides_GetConfig{Call: c_call} +} + +func (_m *TaskOverrides) OnGetConfigMatch(matchers ...interface{}) *TaskOverrides_GetConfig { + c_call := _m.On("GetConfig", matchers...) + return &TaskOverrides_GetConfig{Call: c_call} +} + +// GetConfig provides a mock function with given fields: +func (_m *TaskOverrides) GetConfig() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type TaskOverrides_GetConfigMap struct { + *mock.Call +} + +func (_m TaskOverrides_GetConfigMap) Return(_a0 *v1.ConfigMap) *TaskOverrides_GetConfigMap { + return &TaskOverrides_GetConfigMap{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetConfigMap() *TaskOverrides_GetConfigMap { + c_call := _m.On("GetConfigMap") + return &TaskOverrides_GetConfigMap{Call: c_call} +} + +func (_m *TaskOverrides) OnGetConfigMapMatch(matchers ...interface{}) *TaskOverrides_GetConfigMap { + c_call := _m.On("GetConfigMap", matchers...) + return &TaskOverrides_GetConfigMap{Call: c_call} +} + +// GetConfigMap provides a mock function with given fields: +func (_m *TaskOverrides) GetConfigMap() *v1.ConfigMap { + ret := _m.Called() + + var r0 *v1.ConfigMap + if rf, ok := ret.Get(0).(func() *v1.ConfigMap); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.ConfigMap) + } + } + + return r0 +} + +type TaskOverrides_GetContainerImage struct { + *mock.Call +} + +func (_m TaskOverrides_GetContainerImage) Return(_a0 string) *TaskOverrides_GetContainerImage { + return &TaskOverrides_GetContainerImage{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetContainerImage() *TaskOverrides_GetContainerImage { + c_call := _m.On("GetContainerImage") + return &TaskOverrides_GetContainerImage{Call: c_call} +} + +func (_m *TaskOverrides) OnGetContainerImageMatch(matchers ...interface{}) *TaskOverrides_GetContainerImage { + c_call := _m.On("GetContainerImage", matchers...) + return &TaskOverrides_GetContainerImage{Call: c_call} +} + +// GetContainerImage provides a mock function with given fields: +func (_m *TaskOverrides) GetContainerImage() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type TaskOverrides_GetExtendedResources struct { + *mock.Call +} + +func (_m TaskOverrides_GetExtendedResources) Return(_a0 *flyteidlcore.ExtendedResources) *TaskOverrides_GetExtendedResources { + return &TaskOverrides_GetExtendedResources{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetExtendedResources() *TaskOverrides_GetExtendedResources { + c_call := _m.On("GetExtendedResources") + return &TaskOverrides_GetExtendedResources{Call: c_call} +} + +func (_m *TaskOverrides) OnGetExtendedResourcesMatch(matchers ...interface{}) *TaskOverrides_GetExtendedResources { + c_call := _m.On("GetExtendedResources", matchers...) + return &TaskOverrides_GetExtendedResources{Call: c_call} +} + +// GetExtendedResources provides a mock function with given fields: +func (_m *TaskOverrides) GetExtendedResources() *flyteidlcore.ExtendedResources { + ret := _m.Called() + + var r0 *flyteidlcore.ExtendedResources + if rf, ok := ret.Get(0).(func() *flyteidlcore.ExtendedResources); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flyteidlcore.ExtendedResources) + } + } + + return r0 +} + +type TaskOverrides_GetPodTemplate struct { + *mock.Call +} + +func (_m TaskOverrides_GetPodTemplate) Return(_a0 *flyteidlcore.K8SPod) *TaskOverrides_GetPodTemplate { + return &TaskOverrides_GetPodTemplate{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetPodTemplate() *TaskOverrides_GetPodTemplate { + c_call := _m.On("GetPodTemplate") + return &TaskOverrides_GetPodTemplate{Call: c_call} +} + +func (_m *TaskOverrides) OnGetPodTemplateMatch(matchers ...interface{}) *TaskOverrides_GetPodTemplate { + c_call := _m.On("GetPodTemplate", matchers...) + return &TaskOverrides_GetPodTemplate{Call: c_call} +} + +// GetPodTemplate provides a mock function with given fields: +func (_m *TaskOverrides) GetPodTemplate() *flyteidlcore.K8SPod { + ret := _m.Called() + + var r0 *flyteidlcore.K8SPod + if rf, ok := ret.Get(0).(func() *flyteidlcore.K8SPod); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flyteidlcore.K8SPod) + } + } + + return r0 +} + +type TaskOverrides_GetResources struct { + *mock.Call +} + +func (_m TaskOverrides_GetResources) Return(_a0 *v1.ResourceRequirements) *TaskOverrides_GetResources { + return &TaskOverrides_GetResources{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskOverrides) OnGetResources() *TaskOverrides_GetResources { + c_call := _m.On("GetResources") + return &TaskOverrides_GetResources{Call: c_call} +} + +func (_m *TaskOverrides) OnGetResourcesMatch(matchers ...interface{}) *TaskOverrides_GetResources { + c_call := _m.On("GetResources", matchers...) + return &TaskOverrides_GetResources{Call: c_call} +} + +// GetResources provides a mock function with given fields: +func (_m *TaskOverrides) GetResources() *v1.ResourceRequirements { + ret := _m.Called() + + var r0 *v1.ResourceRequirements + if rf, ok := ret.Get(0).(func() *v1.ResourceRequirements); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.ResourceRequirements) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_reader.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_reader.go new file mode 100644 index 0000000000..47a735771e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_reader.go @@ -0,0 +1,98 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + flyteidlcore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// TaskReader is an autogenerated mock type for the TaskReader type +type TaskReader struct { + mock.Mock +} + +type TaskReader_Path struct { + *mock.Call +} + +func (_m TaskReader_Path) Return(_a0 storage.DataReference, _a1 error) *TaskReader_Path { + return &TaskReader_Path{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *TaskReader) OnPath(ctx context.Context) *TaskReader_Path { + c_call := _m.On("Path", ctx) + return &TaskReader_Path{Call: c_call} +} + +func (_m *TaskReader) OnPathMatch(matchers ...interface{}) *TaskReader_Path { + c_call := _m.On("Path", matchers...) + return &TaskReader_Path{Call: c_call} +} + +// Path provides a mock function with given fields: ctx +func (_m *TaskReader) Path(ctx context.Context) (storage.DataReference, error) { + ret := _m.Called(ctx) + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func(context.Context) storage.DataReference); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type TaskReader_Read struct { + *mock.Call +} + +func (_m TaskReader_Read) Return(_a0 *flyteidlcore.TaskTemplate, _a1 error) *TaskReader_Read { + return &TaskReader_Read{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *TaskReader) OnRead(ctx context.Context) *TaskReader_Read { + c_call := _m.On("Read", ctx) + return &TaskReader_Read{Call: c_call} +} + +func (_m *TaskReader) OnReadMatch(matchers ...interface{}) *TaskReader_Read { + c_call := _m.On("Read", matchers...) + return &TaskReader_Read{Call: c_call} +} + +// Read provides a mock function with given fields: ctx +func (_m *TaskReader) Read(ctx context.Context) (*flyteidlcore.TaskTemplate, error) { + ret := _m.Called(ctx) + + var r0 *flyteidlcore.TaskTemplate + if rf, ok := ret.Get(0).(func(context.Context) *flyteidlcore.TaskTemplate); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*flyteidlcore.TaskTemplate) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_template_path.go b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_template_path.go new file mode 100644 index 0000000000..f7f50be183 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/mocks/task_template_path.go @@ -0,0 +1,55 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// TaskTemplatePath is an autogenerated mock type for the TaskTemplatePath type +type TaskTemplatePath struct { + mock.Mock +} + +type TaskTemplatePath_Path struct { + *mock.Call +} + +func (_m TaskTemplatePath_Path) Return(_a0 storage.DataReference, _a1 error) *TaskTemplatePath_Path { + return &TaskTemplatePath_Path{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *TaskTemplatePath) OnPath(ctx context.Context) *TaskTemplatePath_Path { + c_call := _m.On("Path", ctx) + return &TaskTemplatePath_Path{Call: c_call} +} + +func (_m *TaskTemplatePath) OnPathMatch(matchers ...interface{}) *TaskTemplatePath_Path { + c_call := _m.On("Path", matchers...) + return &TaskTemplatePath_Path{Call: c_call} +} + +// Path provides a mock function with given fields: ctx +func (_m *TaskTemplatePath) Path(ctx context.Context) (storage.DataReference, error) { + ret := _m.Called(ctx) + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func(context.Context) storage.DataReference); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/phase.go b/flyteplugins/go/tasks/pluginmachinery/core/phase.go new file mode 100644 index 0000000000..7ede39b4c2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/phase.go @@ -0,0 +1,312 @@ +package core + +import ( + "fmt" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const DefaultPhaseVersion = uint32(0) +const SystemErrorCode = "SystemError" + +//go:generate enumer -type=Phase + +type Phase int8 + +const ( + // Does not mean an error, but simply states that we dont know the state in this round, try again later. But can be used to signal a system error too + PhaseUndefined Phase = iota + PhaseNotReady + // Indicates plugin is not ready to submit the request as it is waiting for resources + PhaseWaitingForResources + // Indicates plugin has submitted the execution, but it has not started executing yet + PhaseQueued + // The system has started the pre-execution process, like container download, cluster startup etc + PhaseInitializing + // Indicates that the task has started executing + PhaseRunning + // Indicates that the task has completed successfully + PhaseSuccess + // Indicates that the Failure is recoverable, by re-executing the task if retries permit + PhaseRetryableFailure + // Indicate that the failure is non recoverable even if retries exist + PhasePermanentFailure + // Indicates the task is waiting for the cache to be populated so it can reuse results + PhaseWaitingForCache + // Indicate the task has been aborted + PhaseAborted +) + +var Phases = []Phase{ + PhaseUndefined, + PhaseNotReady, + PhaseWaitingForResources, + PhaseQueued, + PhaseInitializing, + PhaseRunning, + PhaseSuccess, + PhaseRetryableFailure, + PhasePermanentFailure, + PhaseWaitingForCache, + PhaseAborted, +} + +// Returns true if the given phase is failure, retryable failure or success +func (p Phase) IsTerminal() bool { + return p.IsFailure() || p.IsSuccess() || p.IsAborted() +} + +func (p Phase) IsFailure() bool { + return p == PhasePermanentFailure || p == PhaseRetryableFailure +} + +func (p Phase) IsSuccess() bool { + return p == PhaseSuccess +} + +func (p Phase) IsAborted() bool { + return p == PhaseAborted +} + +func (p Phase) IsWaitingForResources() bool { + return p == PhaseWaitingForResources +} + +type ExternalResource struct { + // A unique identifier for the external resource + ExternalID string + // Captures the status of caching for this external resource + CacheStatus core.CatalogCacheStatus + // A unique index for the external resource. Although the ID may change, this will remain the same + // throughout task event reports and retries. + Index uint32 + // Log information for the external resource + Logs []*core.TaskLog + // Contains metadata required to identify logs related to this task execution + LogContext *core.LogContext + // The number of times this external resource has been attempted + RetryAttempt uint32 + // Phase (if exists) associated with the external resource + Phase Phase + // Extensible field for custom, plugin-specific info + CustomInfo *structpb.Struct +} + +type ReasonInfo struct { + Reason string + OccurredAt *time.Time +} + +type TaskInfo struct { + // log information for the task execution + Logs []*core.TaskLog + // Contains metadata required to identify logs related to this task execution + LogContext *core.LogContext + // This value represents the time the status occurred at. If not provided, it will be defaulted to the time Flyte + // checked the task status. + OccurredAt *time.Time + // This value represents the time the status was reported at. If not provided, will be defaulted to the current time + // when Flyte published the event. + ReportedAt *time.Time + // Custom Event information that the plugin would like to expose to the front-end + CustomInfo *structpb.Struct + // A collection of information about external resources launched by this task + ExternalResources []*ExternalResource + // Additional reasons for this case. Note, these are not included in the phase state. + AdditionalReasons []ReasonInfo +} + +func (t *TaskInfo) String() string { + return fmt.Sprintf("Info<@%s>", t.OccurredAt.String()) +} + +// Additional info that should be sent to the front end. The Information is sent to the front-end if it meets certain +// criterion, for example currently, it is sent only if an event was not already sent for +type PhaseInfo struct { + // Observed Phase of the launched Task execution + phase Phase + // Phase version. by default this can be left as empty => 0. This can be used if there is some additional information + // to be provided to the Control plane. Phase information is immutable in control plane for a given Phase, unless + // a new version is provided. + version uint32 + // In case info needs to be provided + info *TaskInfo + // If only an error is observed. It is complementary to info + err *core.ExecutionError + // reason why the current phase exists. + reason string + // cleanupOnFailure indicates that this task should be cleaned up even though the phase indicates a failure. This + // applies to situations where a task is marked a failure but is still running, for example an ImagePullBackoff in + // a k8s Pod where the image does not exist will continually reattempt the pull even though it will never succeed. + cleanupOnFailure bool +} + +func (p PhaseInfo) Phase() Phase { + return p.phase +} + +func (p PhaseInfo) Version() uint32 { + return p.version +} + +func (p PhaseInfo) Reason() string { + return p.reason +} + +func (p PhaseInfo) Info() *TaskInfo { + return p.info +} + +func (p PhaseInfo) Err() *core.ExecutionError { + return p.err +} + +func (p PhaseInfo) CleanupOnFailure() bool { + return p.cleanupOnFailure +} + +func (p PhaseInfo) WithVersion(version uint32) PhaseInfo { + return PhaseInfo{ + phase: p.phase, + version: version, + info: p.info, + err: p.err, + reason: p.reason, + } +} + +func (p *PhaseInfo) WithReason(reason string) { + if p.reason != "" { + p.reason += ", " + reason + } else { + p.reason = reason + } +} + +func (p PhaseInfo) String() string { + if p.err != nil { + return fmt.Sprintf("Phase<%s:%d Error:%s>", p.phase, p.version, p.err) + } + return fmt.Sprintf("Phase<%s:%d %s Reason:%s>", p.phase, p.version, p.info, p.reason) +} + +// PhaseInfoUndefined should be used when the Phase is unknown usually associated with an error +var PhaseInfoUndefined = PhaseInfo{phase: PhaseUndefined} + +func phaseInfo(p Phase, v uint32, err *core.ExecutionError, info *TaskInfo, cleanupOnFailure bool) PhaseInfo { + if info == nil { + info = &TaskInfo{} + } + if info.OccurredAt == nil { + t := time.Now() + info.OccurredAt = &t + } + return PhaseInfo{ + phase: p, + version: v, + info: info, + err: err, + cleanupOnFailure: cleanupOnFailure, + } +} + +// PhaseInfoNotReady represents the case the plugin is not ready to start +func PhaseInfoNotReady(t time.Time, version uint32, reason string) PhaseInfo { + pi := phaseInfo(PhaseNotReady, version, nil, &TaskInfo{OccurredAt: &t}, false) + pi.reason = reason + return pi +} + +// Deprecated: Please use PhaseInfoWaitingForResourcesInfo instead +func PhaseInfoWaitingForResources(t time.Time, version uint32, reason string) PhaseInfo { + pi := phaseInfo(PhaseWaitingForResources, version, nil, &TaskInfo{OccurredAt: &t}, false) + pi.reason = reason + return pi +} + +// PhaseInfoWaitingForResourcesInfo represents the case the plugin is not ready to start +func PhaseInfoWaitingForResourcesInfo(t time.Time, version uint32, reason string, info *TaskInfo) PhaseInfo { + pi := phaseInfo(PhaseWaitingForResources, version, nil, info, false) + pi.reason = reason + return pi +} + +func PhaseInfoQueued(t time.Time, version uint32, reason string) PhaseInfo { + pi := phaseInfo(PhaseQueued, version, nil, &TaskInfo{OccurredAt: &t}, false) + pi.reason = reason + return pi +} + +func PhaseInfoQueuedWithTaskInfo(t time.Time, version uint32, reason string, info *TaskInfo) PhaseInfo { + pi := phaseInfo(PhaseQueued, version, nil, info, false) + pi.reason = reason + return pi +} + +func PhaseInfoInitializing(t time.Time, version uint32, reason string, info *TaskInfo) PhaseInfo { + pi := phaseInfo(PhaseInitializing, version, nil, info, false) + pi.reason = reason + return pi +} + +func phaseInfoFailed(p Phase, err *core.ExecutionError, info *TaskInfo, cleanupOnFailure bool) PhaseInfo { + if err == nil { + err = &core.ExecutionError{ + Code: "Unknown", + Message: "Unknown error message", + } + } + return phaseInfo(p, DefaultPhaseVersion, err, info, cleanupOnFailure) +} + +func PhaseInfoFailed(p Phase, err *core.ExecutionError, info *TaskInfo) PhaseInfo { + return phaseInfo(p, DefaultPhaseVersion, err, info, false) +} + +func PhaseInfoRunning(version uint32, info *TaskInfo) PhaseInfo { + return phaseInfo(PhaseRunning, version, nil, info, false) +} + +func PhaseInfoSuccess(info *TaskInfo) PhaseInfo { + return phaseInfo(PhaseSuccess, DefaultPhaseVersion, nil, info, false) +} + +func PhaseInfoSystemFailure(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhasePermanentFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_SYSTEM}, info, false) +} + +func PhaseInfoSystemFailureWithCleanup(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhasePermanentFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_SYSTEM}, info, true) +} + +func PhaseInfoFailure(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhasePermanentFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_USER}, info, false) +} + +func PhaseInfoFailureWithCleanup(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhasePermanentFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_USER}, info, true) +} + +func PhaseInfoRetryableFailure(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhaseRetryableFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_USER}, info, false) +} + +func PhaseInfoRetryableFailureWithCleanup(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhaseRetryableFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_USER}, info, true) +} + +func PhaseInfoSystemRetryableFailure(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhaseRetryableFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_SYSTEM}, info, false) +} + +func PhaseInfoSystemRetryableFailureWithCleanup(code, reason string, info *TaskInfo) PhaseInfo { + return phaseInfoFailed(PhaseRetryableFailure, &core.ExecutionError{Code: code, Message: reason, Kind: core.ExecutionError_SYSTEM}, info, true) +} + +// Creates a new PhaseInfo with phase set to PhaseWaitingForCache +func PhaseInfoWaitingForCache(version uint32, info *TaskInfo) PhaseInfo { + return phaseInfo(PhaseWaitingForCache, version, nil, info, false) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/phase_enumer.go b/flyteplugins/go/tasks/pluginmachinery/core/phase_enumer.go new file mode 100644 index 0000000000..caa4d86250 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/phase_enumer.go @@ -0,0 +1,58 @@ +// Code generated by "enumer -type=Phase"; DO NOT EDIT. + +package core + +import ( + "fmt" +) + +const _PhaseName = "PhaseUndefinedPhaseNotReadyPhaseWaitingForResourcesPhaseQueuedPhaseInitializingPhaseRunningPhaseSuccessPhaseRetryableFailurePhasePermanentFailurePhaseWaitingForCachePhaseAborted" + +var _PhaseIndex = [...]uint8{0, 14, 27, 51, 62, 79, 91, 103, 124, 145, 165, 177} + +func (i Phase) String() string { + if i < 0 || i >= Phase(len(_PhaseIndex)-1) { + return fmt.Sprintf("Phase(%d)", i) + } + return _PhaseName[_PhaseIndex[i]:_PhaseIndex[i+1]] +} + +var _PhaseValues = []Phase{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10} + +var _PhaseNameToValueMap = map[string]Phase{ + _PhaseName[0:14]: 0, + _PhaseName[14:27]: 1, + _PhaseName[27:51]: 2, + _PhaseName[51:62]: 3, + _PhaseName[62:79]: 4, + _PhaseName[79:91]: 5, + _PhaseName[91:103]: 6, + _PhaseName[103:124]: 7, + _PhaseName[124:145]: 8, + _PhaseName[145:165]: 9, + _PhaseName[165:177]: 10, +} + +// PhaseString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PhaseString(s string) (Phase, error) { + if val, ok := _PhaseNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Phase values", s) +} + +// PhaseValues returns all values of the enum +func PhaseValues() []Phase { + return _PhaseValues +} + +// IsAPhase returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Phase) IsAPhase() bool { + for _, v := range _PhaseValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/phase_test.go b/flyteplugins/go/tasks/pluginmachinery/core/phase_test.go new file mode 100644 index 0000000000..08dc44fb78 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/phase_test.go @@ -0,0 +1,71 @@ +package core + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPhaseInfo_WithReason(t *testing.T) { + tests := []struct { + name string + initialReason string + newReason string + expectedReason string + }{ + { + name: "empty initial reason", + initialReason: "", + newReason: "new reason", + expectedReason: "new reason", + }, + { + name: "existing reason gets concatenated", + initialReason: "initial reason", + newReason: "additional reason", + expectedReason: "initial reason, additional reason", + }, + { + name: "multiple concatenations", + initialReason: "first reason, second reason", + newReason: "third reason", + expectedReason: "first reason, second reason, third reason", + }, + { + name: "empty new reason", + initialReason: "existing reason", + newReason: "", + expectedReason: "existing reason, ", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + phaseInfo := PhaseInfo{ + phase: PhaseRunning, + reason: tt.initialReason, + } + + phaseInfo.WithReason(tt.newReason) + + assert.Equal(t, tt.expectedReason, phaseInfo.reason) + }) + } +} + +func TestPhaseInfo_WithReason_DoesNotAffectOtherFields(t *testing.T) { + info := &TaskInfo{} + phaseInfo := PhaseInfo{ + phase: PhaseRunning, + version: 1, + info: info, + reason: "initial", + } + + phaseInfo.WithReason("additional") + + assert.Equal(t, PhaseRunning, phaseInfo.phase) + assert.Equal(t, uint32(1), phaseInfo.version) + assert.Equal(t, info, phaseInfo.info) + assert.Equal(t, "initial, additional", phaseInfo.reason) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/plugin.go b/flyteplugins/go/tasks/pluginmachinery/core/plugin.go new file mode 100644 index 0000000000..2987f084f9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/plugin.go @@ -0,0 +1,71 @@ +package core + +import ( + "context" + "fmt" +) + +//go:generate mockery -all -case=underscore + +// https://github.com/flyteorg/flytepropeller/blob/979fabe1d1b22b01645259a03b8096f227681d08/pkg/utils/encoder.go#L25-L26 +const minGeneratedNameLength = 8 + +type TaskType = string + +// A Lazy loading function, that will load the plugin. Plugins should be initialized in this method. It is guaranteed +// that the plugin loader will be called before any Handle/Abort/Finalize functions are invoked +type PluginLoader func(ctx context.Context, iCtx SetupContext) (Plugin, error) + +// An entry that identifies the CorePlugin +type PluginEntry struct { + // System wide unique identifier for the plugin + ID TaskType + // A list of all the task types for which this plugin is applicable. + RegisteredTaskTypes []TaskType + // A Lazy loading function, that will load the plugin. Plugins should be initialized in this method. It is guaranteed + // that the plugin loader will be called before any Handle/Abort/Finalize functions are invoked + LoadPlugin PluginLoader + // Boolean that indicates if this plugin can be used as the default for unknown task types. There can only be + // one default in the system + IsDefault bool +} + +// System level properties that this Plugin supports +type PluginProperties struct { + // Instructs the execution engine to not attempt to cache lookup or write for the node. + DisableNodeLevelCaching bool + // Specifies the length of TaskExecutionID generated name. default: 50 + GeneratedNameMaxLength *int +} + +// Interface for the core Flyte plugin +type Plugin interface { + // Unique ID for the plugin, should be ideally the same the ID in PluginEntry + GetID() string + // Properties desired by the plugin from the available set + GetProperties() PluginProperties + // The actual method that is invoked for every single task execution. The method should be a non blocking method. + // It maybe invoked multiple times and hence all actions should be idempotent. If idempotency is not possible look at + // Transitions to get some system level guarantees + Handle(ctx context.Context, tCtx TaskExecutionContext) (Transition, error) + // Called when the task is to be killed/aborted, because the top level entity was aborted or some other failure happened. + // Abort should always be idempotent + Abort(ctx context.Context, tCtx TaskExecutionContext) error + // Finalize is always called, after Handle or Abort. Finalize should be an idempotent operation + Finalize(ctx context.Context, tCtx TaskExecutionContext) error +} + +// LoadPlugin Loads and validates a plugin. +func LoadPlugin(ctx context.Context, iCtx SetupContext, entry PluginEntry) (Plugin, error) { + plugin, err := entry.LoadPlugin(ctx, iCtx) + if err != nil { + return nil, err + } + + length := plugin.GetProperties().GeneratedNameMaxLength + if length != nil && *length < minGeneratedNameLength { + return nil, fmt.Errorf("GeneratedNameMaxLength needs to be greater then %d", minGeneratedNameLength) + } + + return plugin, err +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/plugin_test.go b/flyteplugins/go/tasks/pluginmachinery/core/plugin_test.go new file mode 100644 index 0000000000..d7099de00d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/plugin_test.go @@ -0,0 +1,111 @@ +package core_test + +import ( + "context" + "testing" + + "gotest.tools/assert" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + //"github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/webapi/agent" +) + +func TestLoadPlugin(t *testing.T) { + corePluginType := "core" + + t.Run("valid", func(t *testing.T) { + corePlugin := &mocks.Plugin{} + corePlugin.On("GetID").Return(corePluginType) + corePlugin.OnGetProperties().Return(core.PluginProperties{}) + + corePluginEntry := core.PluginEntry{ + ID: corePluginType, + RegisteredTaskTypes: []core.TaskType{corePluginType}, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) (core.Plugin, error) { + return corePlugin, nil + }, + } + setupCtx := mocks.SetupContext{} + p, err := core.LoadPlugin(context.TODO(), &setupCtx, corePluginEntry) + assert.NilError(t, err) + assert.Equal(t, corePluginType, p.GetID()) + }) + + t.Run("valid GeneratedNameMaxLength", func(t *testing.T) { + corePlugin := &mocks.Plugin{} + corePlugin.On("GetID").Return(corePluginType) + length := 10 + corePlugin.OnGetProperties().Return(core.PluginProperties{ + GeneratedNameMaxLength: &length, + }) + + corePluginEntry := core.PluginEntry{ + ID: corePluginType, + RegisteredTaskTypes: []core.TaskType{corePluginType}, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) (core.Plugin, error) { + return corePlugin, nil + }, + } + setupCtx := mocks.SetupContext{} + p, err := core.LoadPlugin(context.TODO(), &setupCtx, corePluginEntry) + assert.NilError(t, err) + assert.Equal(t, corePluginType, p.GetID()) + }) + + t.Run("valid GeneratedNameMaxLength", func(t *testing.T) { + corePlugin := &mocks.Plugin{} + corePlugin.On("GetID").Return(corePluginType) + length := 10 + corePlugin.OnGetProperties().Return(core.PluginProperties{ + GeneratedNameMaxLength: &length, + }) + + corePluginEntry := core.PluginEntry{ + ID: corePluginType, + RegisteredTaskTypes: []core.TaskType{corePluginType}, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) (core.Plugin, error) { + return corePlugin, nil + }, + } + setupCtx := mocks.SetupContext{} + _, err := core.LoadPlugin(context.TODO(), &setupCtx, corePluginEntry) + assert.NilError(t, err) + }) + + t.Run("invalid GeneratedNameMaxLength", func(t *testing.T) { + corePlugin := &mocks.Plugin{} + corePlugin.On("GetID").Return(corePluginType) + length := 5 + corePlugin.OnGetProperties().Return(core.PluginProperties{ + GeneratedNameMaxLength: &length, + }) + + corePluginEntry := core.PluginEntry{ + ID: corePluginType, + RegisteredTaskTypes: []core.TaskType{corePluginType}, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) (core.Plugin, error) { + return corePlugin, nil + }, + } + setupCtx := mocks.SetupContext{} + _, err := core.LoadPlugin(context.TODO(), &setupCtx, corePluginEntry) + assert.Error(t, err, "GeneratedNameMaxLength needs to be greater then 8") + }) + +} + +// TODO @pvditt re-add after re-adding webapi +//func TestAgentService(t *testing.T) { +// agentService := agent.AgentService{} +// taskTypes := []core.TaskType{"sensor", "chatgpt"} +// +// for _, taskType := range taskTypes { +// assert.Equal(t, false, agentService.ContainTaskType(taskType)) +// } +// +// agentService.SetSupportedTaskType(taskTypes) +// for _, taskType := range taskTypes { +// assert.Equal(t, true, agentService.ContainTaskType(taskType)) +// } +//} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/resource_manager.go b/flyteplugins/go/tasks/pluginmachinery/core/resource_manager.go new file mode 100644 index 0000000000..f16bde9ea0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/resource_manager.go @@ -0,0 +1,106 @@ +package core + +import ( + "context" +) + +//go:generate enumer -type=AllocationStatus -trimprefix=AllocationStatus + +type AllocationStatus int + +const ( + // This is the enum returned when there's an error + AllocationUndefined AllocationStatus = iota + + // Go for it + AllocationStatusGranted + + // This means that no resources are available globally. This is the only rejection message we use right now. + AllocationStatusExhausted + + // We're not currently using this - but this would indicate that things globally are okay, but that your + // own namespace is too busy + AllocationStatusNamespaceQuotaExceeded +) + +const namespaceSeparator = ":" + +type ResourceNamespace string + +func (r ResourceNamespace) CreateSubNamespace(namespace ResourceNamespace) ResourceNamespace { + return r + namespaceSeparator + namespace +} + +type ResourceRegistrar interface { + RegisterResourceQuota(ctx context.Context, namespace ResourceNamespace, quota int) error +} + +// ResourceManager Interface +// 1. Terms and definitions +// +// - Resource: resource is an abstraction of anything that has a limited quota of units and can be claimed in a +// single unit or multiple units at once. At Flyte's current state, a resource means a logical +// separation (e.g., a cluster) of an external service that allows a limited number of outstanding +// requests to be sent to. +// +// - Token: Flyte uses a token to serve as the placeholder to represent a unit of resource. Flyte resource manager +// manages resources by managing the tokens of the resources. +// +// 2. Description +// ResourceManager provides a task-type-specific pooling system for Flyte Tasks. Plugin writers can optionally +// request for resources in their tasks, in single quantity. +// +// 3. Usage +// A Flyte plugin registers the resources and the desired quota of each resource with ResourceRegistrar at the +// setup time of Flyte Propeller. At the end of the setup time, Flyte Propeller builds a ResourceManager based on +// these registration requests. +// +// During runtime, the ResourceManager does two simple things: allocating tokens and releasing tokens. When a Flyte +// task execution wants to send a request to an external service, the plugin should claim a unit of the corresponding +// resource. Specifically, an execution needs to generate a unique token, and register the token with ResourceManager +// by calling ResourceManager's AllocateResource() function. ResourceManager will check its current utilization and +// the allocation policy to decide whether or not to grant the request. Only when receiving the "AllocationGranted" +// status shall this execution move forward and send out the request. The granted token will be recorded in a token +// pool corresponding to the resource and managed by ResourceManager. When the request is done, the plugin will ask +// the resource manager to release the token by calling ResourceManager's ReleaseResource(), and the token will be +// erased from the corresponding pool. +// +// 4. Example +// Flyte has a built-on Qubole plugin that allows Flyte tasks to send out Hive commands to Qubole. +// In the plugin, a single Qubole cluster is a resource, and sending out a single Hive command to a Qubole cluster consumes +// a token of the corresponding resource. The resource allocation is achieved by the Qubole plugin calling +// status, err := AllocateResource(ctx, , , ) +// and the de-allocation is achieved by the plugin calling +// status, err := AllocateResource(ctx, , , ) +// +// For example, +// status, err := AllocateResource(ctx, "default_cluster", "flkgiwd13-akjdoe-0", ResourceConstraintsSpec{}) +// When the corresponding Hive command finishes, the plugin needs to make the following function call to release +// the corresponding token +// err := ReleaseResource(ctx, "default_cluster", "flkgiwd13-akjdoe-0") +type ResourceManager interface { + GetID() string + // During execution time, plugins can call AllocateResource() to register a token to the token pool associated with a resource with the resource manager. + // If granted an allocation, the token will be recorded in the corresponding token pool until the same plugin releases it. + // When calling AllocateResource, the plugin needs to specify a ResourceConstraintsSpec which contains resource capping constraints at different levels. + // The ResourceConstraint pointers in ResourceConstraintsSpec, however, can be set to nil to present a non-constraint at that level + AllocateResource(ctx context.Context, namespace ResourceNamespace, allocationToken string, constraintsSpec ResourceConstraintsSpec) (AllocationStatus, error) + // During execution time, after an outstanding request is completed, the plugin need to use ReleaseResource() to release the allocation of the corresponding token + // from the token pool in order to gain back the quota taken by the token + ReleaseResource(ctx context.Context, namespace ResourceNamespace, allocationToken string) error +} + +type ResourceConstraint struct { + Value int64 +} + +// ResourceConstraintsSpec is a contract that a plugin can specify with ResourceManager to force runtime quota-allocation constraints +// at different levels. +// +// Setting constraints in a ResourceConstraintsSpec to nil objects is valid, meaning there's no constraint at the corresponding level. +// For example, a ResourceConstraintsSpec with nil ProjectScopeResourceConstraint and a non-nil NamespaceScopeResourceConstraint means +// that it only poses a cap at the namespace level. A zero-value ResourceConstraintsSpec means there's no constraints posed at any level. +type ResourceConstraintsSpec struct { + ProjectScopeResourceConstraint *ResourceConstraint + NamespaceScopeResourceConstraint *ResourceConstraint +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/secret_manager.go b/flyteplugins/go/tasks/pluginmachinery/core/secret_manager.go new file mode 100644 index 0000000000..cfd4eb80c0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/secret_manager.go @@ -0,0 +1,48 @@ +package core + +import ( + "context" + //"fmt" + //"unicode/utf8" + //// @pvditt fix this + //"github.com/flyteorg/flyte/flytepropeller/pkg/secret" + //"github.com/flyteorg/flyte/flytepropeller/pkg/secret/config" +) + +type SecretManager interface { + Get(ctx context.Context, key string) (string, error) +} + +type EmbeddedSecretManager struct { + //secretFetcher secret.SecretFetcher +} + +func (e *EmbeddedSecretManager) Get(ctx context.Context, key string) (string, error) { + //secretValue, err := e.secretFetcher.GetSecretValue(ctx, key) + //if err != nil { + // return "", err + //} + // + //if secretValue.StringValue != "" { + // return secretValue.StringValue, nil + //} + // + //// GCP secrets store values as binary only. We could fail this path for AWS, but for + //// consistent behaviour between AWS and GCP we will allow this path for AWS as well. + //if !utf8.Valid(secretValue.BinaryValue) { + // return "", fmt.Errorf("secret %q has a binary value that is not a valid UTF-8 string", key) + //} + //return string(secretValue.BinaryValue), nil + return "hi", nil +} + +// func NewEmbeddedSecretManager(ctx context.Context, cfg config.EmbeddedSecretManagerConfig) (SecretManager, error) { +func NewEmbeddedSecretManager(ctx context.Context) (SecretManager, error) { + //secretFetcher, err := secret.NewSecretFetcher(ctx, cfg) + //if err != nil { + // return nil, err + //} + + //return &EmbeddedSecretManager{secretFetcher}, nil + return &EmbeddedSecretManager{}, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/setup_context.go b/flyteplugins/go/tasks/pluginmachinery/core/setup_context.go new file mode 100644 index 0000000000..3736cba2e7 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/setup_context.go @@ -0,0 +1,26 @@ +package core + +import ( + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +// When a change is observed, the owning entity can be triggered for re-validation +type EnqueueOwner func(labels map[string]string) error + +// Passed to the Loader function when setting up a plugin +type SetupContext interface { + // returns a callback mechanism that indicates that (workflow, task) is ready to be re-evaluated + EnqueueOwner() EnqueueOwner + // returns a list of labels that should be used to enqueue the owner + IncludeEnqueueLabels() []string + // provides a k8s specific owner kind + OwnerKind() string + // a metrics scope to publish stats under + MetricsScope() promutils.Scope + // A kubernetes client to the bound cluster + KubeClient() KubeClient + // Returns a secret manager that can retrieve configured secrets for this plugin + SecretManager() SecretManager + // Returns a resource negotiator that the plugin can register resource quota against + ResourceRegistrar() ResourceRegistrar +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/state.go b/flyteplugins/go/tasks/pluginmachinery/core/state.go new file mode 100644 index 0000000000..55d8d63862 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/state.go @@ -0,0 +1,19 @@ +package core + +// Write new plugin state for a plugin +type PluginStateWriter interface { + // Only the last call to this method is recorded. All previous calls are overwritten + // This data is also not accessible until the next round. + Put(stateVersion uint8, v interface{}) error + // Resets the state to empty or zero value + Reset() error +} + +// Read previously written plugin state (previous round) +type PluginStateReader interface { + // Retrieve state version that is currently stored + GetStateVersion() uint8 + // Retrieve the typed state in t from the stored value. It also returns the stateversion. + // If there is no state, t will be zero value, stateversion will be 0 + Get(t interface{}) (stateVersion uint8, err error) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go new file mode 100644 index 0000000000..7549412e33 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template.go @@ -0,0 +1,243 @@ +// Package template exports the Render method +// Render Evaluates templates in each command with the equivalent value from passed args. Templates are case-insensitive +// Supported templates are: +// - {{ .InputFile }} to receive the input file path. The protocol used will depend on the underlying system +// configuration. E.g. s3://bucket/key/to/file.pb or /var/run/local.pb are both valid. +// - {{ .OutputPrefix }} to receive the path prefix for where to store the outputs. +// - {{ .Inputs.myInput }} to receive the actual value of the input passed. See docs on LiteralMapToTemplateArgs for how +// what to expect each literal type to be serialized as. +// - {{ .RawOutputDataPrefix }} to receive a path where the raw output data should be ideally written. It is guaranteed +// to be unique per retry and finally one will be saved as the output path +// - {{ .PerRetryUniqueKey }} A key/id/str that is generated per retry and is guaranteed to be unique. Useful in query +// manipulations +// - {{ .TaskTemplatePath }} A path in blobstore/metadata store (e.g. s3, gcs etc) to where an offloaded version of the +// task template exists and can be accessed by the container / task execution environment. The template is a +// a serialized protobuf +// - {{ .PrevCheckpointPrefix }} A path to the checkpoint directory for the previous attempt. If this is the first attempt +// then this is replaced by an empty string +// - {{ .CheckpointOutputPrefix }} A Flyte aware path where the current execution should write the checkpoints. +package template + +import ( + "context" + "fmt" + "reflect" + "regexp" + "strings" + + "github.com/golang/protobuf/ptypes" + "github.com/pkg/errors" + "github.com/shamaton/msgpack/v2" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var alphaNumericOnly = regexp.MustCompile("[^a-zA-Z0-9_]+") +var startsWithAlpha = regexp.MustCompile("^[^a-zA-Z_]+") + +// Regexes for Supported templates +var inputFileRegex = regexp.MustCompile(`(?i){{\s*[\.$]Input\s*}}`) +var inputPrefixRegex = regexp.MustCompile(`(?i){{\s*[\.$]InputPrefix\s*}}`) +var outputRegex = regexp.MustCompile(`(?i){{\s*[\.$]OutputPrefix\s*}}`) +var inputVarRegex = regexp.MustCompile(`(?i){{\s*[\.$]Inputs\.(?P[^}\s]+)\s*}}`) +var rawOutputDataPrefixRegex = regexp.MustCompile(`(?i){{\s*[\.$]RawOutputDataPrefix\s*}}`) +var perRetryUniqueKey = regexp.MustCompile(`(?i){{\s*[\.$]PerRetryUniqueKey\s*}}`) +var taskTemplateRegex = regexp.MustCompile(`(?i){{\s*[\.$]TaskTemplatePath\s*}}`) +var prevCheckpointPrefixRegex = regexp.MustCompile(`(?i){{\s*[\.$]PrevCheckpointPrefix\s*}}`) +var currCheckpointPrefixRegex = regexp.MustCompile(`(?i){{\s*[\.$]CheckpointOutputPrefix\s*}}`) +var namespaceRegex = regexp.MustCompile(`(?i){{\s*[\.$]Namespace\s*}}`) + +type ErrorCollection struct { + Errors []error +} + +func (e ErrorCollection) Error() string { + sb := strings.Builder{} + for idx, err := range e.Errors { + sb.WriteString(fmt.Sprintf("%v: %v\r\n", idx, err)) + } + + return sb.String() +} + +// Parameters struct is used by the Templating Engine to replace the templated parameters +type Parameters struct { + TaskExecMetadata core.TaskExecutionMetadata + Inputs io.InputReader + OutputPath io.OutputFilePaths + Task core.TaskTemplatePath + IncludeConsoleURL bool +} + +// Render Evaluates templates in each command with the equivalent value from passed args. Templates are case-insensitive +// If a command isn't a valid template or failed to evaluate, it'll be returned as is. +// Refer to the package docs for a list of supported templates +// NOTE: I wanted to do in-place replacement, until I realized that in-place replacement will alter the definition of the +// graph. This is not desirable, as we may have to retry and in that case the replacement will not work and we want +// to create a new location for outputs +func Render(ctx context.Context, inputTemplate []string, params Parameters) ([]string, error) { + if len(inputTemplate) == 0 { + return []string{}, nil + } + + // TODO: Change GetGeneratedName to follow these conventions + var perRetryUniqueKey = params.TaskExecMetadata.GetTaskExecutionID().GetGeneratedName() + perRetryUniqueKey = startsWithAlpha.ReplaceAllString(perRetryUniqueKey, "a") + perRetryUniqueKey = alphaNumericOnly.ReplaceAllString(perRetryUniqueKey, "_") + + logger.Debugf(ctx, "Using [%s] from [%s]", perRetryUniqueKey, params.TaskExecMetadata.GetTaskExecutionID().GetGeneratedName()) + if params.Inputs == nil || params.OutputPath == nil { + return nil, fmt.Errorf("input reader and output path cannot be nil") + } + res := make([]string, 0, len(inputTemplate)) + for _, t := range inputTemplate { + updated, err := render(ctx, t, params, perRetryUniqueKey) + if err != nil { + return res, err + } + + res = append(res, updated) + } + + return res, nil +} + +func render(ctx context.Context, inputTemplate string, params Parameters, perRetryKey string) (string, error) { + + val := inputFileRegex.ReplaceAllString(inputTemplate, params.Inputs.GetInputPath().String()) + val = outputRegex.ReplaceAllString(val, params.OutputPath.GetOutputPrefixPath().String()) + val = inputPrefixRegex.ReplaceAllString(val, params.Inputs.GetInputPrefixPath().String()) + val = rawOutputDataPrefixRegex.ReplaceAllString(val, params.OutputPath.GetRawOutputPrefix().String()) + prevCheckpoint := params.OutputPath.GetPreviousCheckpointsPrefix().String() + if prevCheckpoint == "" { + prevCheckpoint = "\"\"" + } + val = prevCheckpointPrefixRegex.ReplaceAllString(val, prevCheckpoint) + val = currCheckpointPrefixRegex.ReplaceAllString(val, params.OutputPath.GetCheckpointPrefix().String()) + val = perRetryUniqueKey.ReplaceAllString(val, perRetryKey) + + // For Task template, we will replace only if there is a match. This is because, task template replacement + // may be expensive, as we may offload + if taskTemplateRegex.MatchString(val) { + p, err := params.Task.Path(ctx) + if err != nil { + logger.Debugf(ctx, "Failed to substitute Task Template reference - reason %s", err) + return "", err + } + val = taskTemplateRegex.ReplaceAllString(val, p.String()) + } + + // Replace namespace last, in case it was embedded in other templates + val = namespaceRegex.ReplaceAllString(val, params.TaskExecMetadata.GetNamespace()) + + var errs ErrorCollection + if inputVarRegex.MatchString(val) { + inputs, err := params.Inputs.Get(ctx) + if err != nil { + return val, errors.Wrapf(err, "unable to read inputs") + } + if inputs == nil || inputs.Literals == nil { + return val, nil + } + val = inputVarRegex.ReplaceAllStringFunc(val, func(s string) string { + matches := inputVarRegex.FindAllStringSubmatch(s, 1) + varName := matches[0][1] + replaced, err := transformVarNameToStringVal(ctx, varName, inputs) + if err != nil { + errs.Errors = append(errs.Errors, errors.Wrapf(err, "input template [%s]", s)) + return "" + } + return replaced + }) + } + + if len(errs.Errors) > 0 { + return "", errs + } + + return val, nil +} + +func transformVarNameToStringVal(ctx context.Context, varName string, inputs *idlCore.LiteralMap) (string, error) { + inputVal, exists := inputs.Literals[varName] + if !exists { + return "", fmt.Errorf("requested input is not found [%s]", varName) + } + + v, err := serializeLiteral(ctx, inputVal) + if err != nil { + return "", errors.Wrapf(err, "failed to bind a value to inputName [%s]", varName) + } + return v, nil +} + +func serializePrimitive(p *idlCore.Primitive) (string, error) { + switch o := p.Value.(type) { + case *idlCore.Primitive_Integer: + return fmt.Sprintf("%v", o.Integer), nil + case *idlCore.Primitive_Boolean: + return fmt.Sprintf("%v", o.Boolean), nil + case *idlCore.Primitive_Datetime: + return ptypes.TimestampString(o.Datetime), nil + case *idlCore.Primitive_Duration: + return o.Duration.String(), nil + case *idlCore.Primitive_FloatValue: + return fmt.Sprintf("%v", o.FloatValue), nil + case *idlCore.Primitive_StringValue: + return o.StringValue, nil + default: + return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(p.Value)) + } +} + +func serializeLiteralScalar(l *idlCore.Scalar) (string, error) { + switch o := l.Value.(type) { + case *idlCore.Scalar_Primitive: + return serializePrimitive(o.Primitive) + case *idlCore.Scalar_Blob: + return o.Blob.Uri, nil + case *idlCore.Scalar_Schema: + return o.Schema.Uri, nil + case *idlCore.Scalar_Binary: + binaryBytes := o.Binary.Value + var currVal any + if o.Binary.Tag == coreutils.MESSAGEPACK { + err := msgpack.Unmarshal(binaryBytes, &currVal) + if err != nil { + return "", fmt.Errorf("failed to unmarshal messagepack bytes with literal:[%v], err:[%v]", l, err) + } + // TODO: Try to support Primitive_Datetime, Primitive_Duration, Flyte File, and Flyte Directory. + return fmt.Sprintf("%v", currVal), nil + } + return "", fmt.Errorf("unsupported binary tag [%v]", o.Binary.Tag) + + default: + return "", fmt.Errorf("received an unexpected scalar type [%v]", reflect.TypeOf(l.Value)) + } +} + +func serializeLiteral(ctx context.Context, l *idlCore.Literal) (string, error) { + switch o := l.Value.(type) { + case *idlCore.Literal_Collection: + res := make([]string, 0, len(o.Collection.Literals)) + for _, sub := range o.Collection.Literals { + s, err := serializeLiteral(ctx, sub) + if err != nil { + return "", err + } + + res = append(res, s) + } + + return fmt.Sprintf("[%v]", strings.Join(res, ",")), nil + case *idlCore.Literal_Scalar: + return serializeLiteralScalar(o.Scalar) + default: + logger.Debugf(ctx, "received unexpected primitive type") + return "", fmt.Errorf("received an unexpected primitive type [%v]", reflect.TypeOf(l.Value)) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go b/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go new file mode 100644 index 0000000000..94351230a4 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/template/template_test.go @@ -0,0 +1,802 @@ +package template + +import ( + "context" + "fmt" + "regexp" + "testing" + "time" + + "github.com/shamaton/msgpack/v2" + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + pluginsCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type dummyInputReader struct { + inputPrefix storage.DataReference + inputPath storage.DataReference + inputs *core.LiteralMap + inputErr bool +} + +func (d dummyInputReader) GetInputPrefixPath() storage.DataReference { + return d.inputPrefix +} + +func (d dummyInputReader) GetInputPath() storage.DataReference { + return d.inputPath +} + +func (d dummyInputReader) Get(ctx context.Context) (*core.LiteralMap, error) { + if d.inputErr { + return nil, fmt.Errorf("expected input fetch error") + } + return d.inputs, nil +} + +type dummyOutputPaths struct { + outputPath storage.DataReference + rawOutputDataPrefix storage.DataReference + prevCheckpointPath storage.DataReference + checkpointPath storage.DataReference +} + +func (d dummyOutputPaths) GetDeckPath() storage.DataReference { + panic("should not be called") +} + +func (d dummyOutputPaths) GetPreviousCheckpointsPrefix() storage.DataReference { + return d.prevCheckpointPath +} + +func (d dummyOutputPaths) GetCheckpointPrefix() storage.DataReference { + return d.checkpointPath +} + +func (d dummyOutputPaths) GetRawOutputPrefix() storage.DataReference { + return d.rawOutputDataPrefix +} + +func (d dummyOutputPaths) GetOutputPrefixPath() storage.DataReference { + return d.outputPath +} + +func (d dummyOutputPaths) GetOutputPath() storage.DataReference { + panic("should not be called") +} + +func (d dummyOutputPaths) GetErrorPath() storage.DataReference { + panic("should not be called") +} + +func TestReplaceTemplateCommandArgs(t *testing.T) { + taskExecutionID := &pluginsCoreMocks.TaskExecutionID{} + taskExecutionID.On("GetGeneratedName").Return("per_retry_unique_key") + taskMetadata := &pluginsCoreMocks.TaskExecutionMetadata{} + taskMetadata.On("GetTaskExecutionID").Return(taskExecutionID) + taskMetadata.On("GetNamespace").Return("test-namespace") + + t.Run("empty cmd", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{}, Parameters{}) + assert.NoError(t, err) + assert.Equal(t, []string{}, actual) + }) + + in := dummyInputReader{inputPath: "input/blah"} + out := dummyOutputPaths{ + outputPath: "output/blah", + rawOutputDataPrefix: "s3://custom-bucket", + } + + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + t.Run("nothing to substitute", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + }, params) + assert.NoError(t, err) + + assert.Equal(t, []string{ + "hello", + "world", + }, actual) + }) + + t.Run("Sub InputFile", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + }, params) + assert.NoError(t, err) + + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + }, actual) + }) + + t.Run("Sub Input Prefix", func(t *testing.T) { + in := dummyInputReader{inputPath: "input/prefix"} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + }, params) + assert.NoError(t, err) + + assert.Equal(t, []string{ + "hello", + "world", + "input/prefix", + }, actual) + }) + + t.Run("Sub Output Prefix", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .OutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "output/blah", + }, actual) + }) + + t.Run("Sub Input Output prefix", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + }, actual) + }) + + t.Run("Bad input template", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "${{input}}", + "{{ .OutputPrefix }}", + "--switch {{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "${{input}}", + "output/blah", + "--switch s3://custom-bucket", + }, actual) + }) + + t.Run("Input arg", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "arr": { + Value: &core.Literal_Collection{ + Collection: &core.LiteralCollection{ + Literals: []*core.Literal{coreutils.MustMakeLiteral("a"), coreutils.MustMakeLiteral("b")}, + }, + }, + }, + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.arr }}`, + "{{ .OutputPrefix }}", + "{{ $RawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "--someArg [a,b]", + "output/blah", + "s3://custom-bucket", + }, actual) + }) + + t.Run("Date", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "date": coreutils.MustMakeLiteral(time.Date(1900, 01, 01, 01, 01, 01, 000000001, time.UTC)), + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.date }}`, + "{{ .OutputPrefix }}", + "{{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "--someArg 1900-01-01T01:01:01.000000001Z", + "output/blah", + "s3://custom-bucket", + }, actual) + }) + + t.Run("2d Array arg", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "arr": coreutils.MustMakeLiteral([]interface{}{[]interface{}{"a", "b"}, []interface{}{1, 2}}), + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.arr }}`, + "{{ .OutputPrefix }}", + "{{ .wrongOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "--someArg [[a,b],[1,2]]", + "output/blah", + "{{ .wrongOutputDataPrefix }}", + }, actual) + }) + + t.Run("nil input", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{}} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.arr }}`, + "{{ .OutputPrefix }}", + "--raw-data-output-prefix {{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + `--someArg {{ .Inputs.arr }}`, + "output/blah", + "--raw-data-output-prefix s3://custom-bucket", + }, actual) + }) + + t.Run("multi-input", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "ds": coreutils.MustMakeLiteral(time.Date(1900, 01, 01, 01, 01, 01, 000000001, time.UTC)), + "table": coreutils.MustMakeLiteral("my_table"), + "hr": coreutils.MustMakeLiteral("hr"), + "min": coreutils.MustMakeLiteral(15), + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + `SELECT + COUNT(*) as total_count + FROM + hive.events.{{ .Inputs.table }} + WHERE + ds = '{{ .Inputs.ds }}' AND hr = '{{ .Inputs.hr }}' AND min = {{ .Inputs.min }} + `}, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + `SELECT + COUNT(*) as total_count + FROM + hive.events.my_table + WHERE + ds = '1900-01-01T01:01:01.000000001Z' AND hr = 'hr' AND min = 15 + `}, actual) + }) + + t.Run("missing input", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "arr": coreutils.MustMakeLiteral([]interface{}{[]interface{}{"a", "b"}, []interface{}{1, 2}}), + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + _, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.blah }}`, + "{{ .OutputPrefix }}", + }, params) + assert.Error(t, err) + }) + + t.Run("bad template", func(t *testing.T) { + in := dummyInputReader{inputs: &core.LiteralMap{ + Literals: map[string]*core.Literal{ + "arr": coreutils.MustMakeLiteral([]interface{}{[]interface{}{"a", "b"}, []interface{}{1, 2}}), + }, + }} + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: nil, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + `--someArg {{ .Inputs.blah blah }} {{ .PerretryuNIqueKey }}`, + "{{ .OutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + `--someArg {{ .Inputs.blah blah }} per_retry_unique_key`, + "output/blah", + }, actual) + }) + + t.Run("sub raw output data prefix", func(t *testing.T) { + actual, err := Render(context.TODO(), []string{ + "hello", + "{{ .perRetryUniqueKey }}", + "world", + "{{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "per_retry_unique_key", + "world", + "s3://custom-bucket", + }, actual) + }) + + t.Run("sub task template happy", func(t *testing.T) { + ctx := context.TODO() + tMock := &pluginsCoreMocks.TaskTemplatePath{} + tMock.OnPath(ctx).Return("s3://task-path", nil) + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: tMock, + } + + actual, err := Render(ctx, []string{ + "hello", + "{{ .perRetryUniqueKey }}", + "world", + "{{ .taskTemplatePath }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "per_retry_unique_key", + "world", + "s3://task-path", + }, actual) + }) + + t.Run("sub task template error", func(t *testing.T) { + ctx := context.TODO() + tMock := &pluginsCoreMocks.TaskTemplatePath{} + tMock.OnPath(ctx).Return("", fmt.Errorf("error")) + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: out, + Task: tMock, + } + + _, err := Render(ctx, []string{ + "hello", + "{{ .perRetryUniqueKey }}", + "world", + "{{ .taskTemplatePath }}", + }, params) + assert.Error(t, err) + }) + + t.Run("missing checkpoint args", func(t *testing.T) { + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: dummyOutputPaths{ + outputPath: out.outputPath, + rawOutputDataPrefix: out.rawOutputDataPrefix, + prevCheckpointPath: "s3://prev-checkpoint/prefix", + checkpointPath: "s3://new-checkpoint/prefix", + }, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + }, actual) + }) + + t.Run("no prev checkpoint", func(t *testing.T) { + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: dummyOutputPaths{ + outputPath: out.outputPath, + rawOutputDataPrefix: out.rawOutputDataPrefix, + prevCheckpointPath: "", + checkpointPath: "s3://new-checkpoint/prefix", + }, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + "--prev={{ .PrevCheckpointPrefix }}", + "--checkpoint={{ .CheckpointOutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + "--prev=\"\"", + "--checkpoint=s3://new-checkpoint/prefix", + }, actual) + }) + + t.Run("all checkpoints", func(t *testing.T) { + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: dummyOutputPaths{ + outputPath: out.outputPath, + rawOutputDataPrefix: out.rawOutputDataPrefix, + prevCheckpointPath: "s3://prev-checkpoint/prefix", + checkpointPath: "s3://new-checkpoint/prefix", + }, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + "--prev={{ .PrevCheckpointPrefix }}", + "--checkpoint={{ .CheckpointOutputPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + "--prev=s3://prev-checkpoint/prefix", + "--checkpoint=s3://new-checkpoint/prefix", + }, actual) + }) + + t.Run("all checkpoints ignore case", func(t *testing.T) { + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: dummyOutputPaths{ + outputPath: out.outputPath, + rawOutputDataPrefix: out.rawOutputDataPrefix, + prevCheckpointPath: "s3://prev-checkpoint/prefix", + checkpointPath: "s3://new-checkpoint/prefix", + }, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + "--prev={{ .prevcheckpointprefix }}", + "--checkpoint={{ .checkpointoutputprefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + "--prev=s3://prev-checkpoint/prefix", + "--checkpoint=s3://new-checkpoint/prefix", + }, actual) + }) + + t.Run("namespace embedded replacement", func(t *testing.T) { + params := Parameters{ + TaskExecMetadata: taskMetadata, + Inputs: in, + OutputPath: dummyOutputPaths{ + outputPath: out.outputPath, + rawOutputDataPrefix: "s3://raw-data/prefix/{{ .Namespace }}", + prevCheckpointPath: "s3://prev-checkpoint/prefix/{{ .Namespace}}", + checkpointPath: "s3://new-checkpoint/prefix/{{.namespace}}", + }, + } + actual, err := Render(context.TODO(), []string{ + "hello", + "world", + "{{ .Input }}", + "{{ .OutputPrefix }}", + "--prev={{ .prevcheckpointprefix }}", + "--checkpoint={{ .checkpointoutputprefix }}", + "--raw-data-output={{ .rawoutputdataprefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "world", + "input/blah", + "output/blah", + "--prev=s3://prev-checkpoint/prefix/test-namespace", + "--checkpoint=s3://new-checkpoint/prefix/test-namespace", + "--raw-data-output=s3://raw-data/prefix/test-namespace", + }, actual) + }) +} + +func TestReplaceTemplateCommandArgsSpecialChars(t *testing.T) { + in := dummyInputReader{inputPath: "input/blah"} + out := dummyOutputPaths{ + outputPath: "output/blah", + rawOutputDataPrefix: "s3://custom-bucket", + } + + params := Parameters{Inputs: in, OutputPath: out} + + t.Run("dashes are replaced", func(t *testing.T) { + taskExecutionID := &pluginsCoreMocks.TaskExecutionID{} + taskExecutionID.On("GetGeneratedName").Return("per-retry-unique-key") + taskMetadata := &pluginsCoreMocks.TaskExecutionMetadata{} + taskMetadata.On("GetTaskExecutionID").Return(taskExecutionID) + taskMetadata.On("GetNamespace").Return("my-namespace") + + params.TaskExecMetadata = taskMetadata + actual, err := Render(context.TODO(), []string{ + "hello", + "{{ .perRetryUniqueKey }}", + "world", + "{{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "per_retry_unique_key", + "world", + "s3://custom-bucket", + }, actual) + }) + + t.Run("non-alphabet leading characters are stripped", func(t *testing.T) { + var startsWithAlpha = regexp.MustCompile("^[^a-zA-Z_]+") + taskExecutionID := &pluginsCoreMocks.TaskExecutionID{} + taskExecutionID.On("GetGeneratedName").Return("33 per retry-unique-key") + taskMetadata := &pluginsCoreMocks.TaskExecutionMetadata{} + taskMetadata.On("GetTaskExecutionID").Return(taskExecutionID) + taskMetadata.On("GetNamespace").Return("my-namespace") + + params.TaskExecMetadata = taskMetadata + testString := "doesn't start with a number" + testString2 := "1 does start with a number" + testString3 := " 1 3 nd spaces " + assert.Equal(t, testString, startsWithAlpha.ReplaceAllString(testString, "a")) + assert.Equal(t, "adoes start with a number", startsWithAlpha.ReplaceAllString(testString2, "a")) + assert.Equal(t, "and spaces ", startsWithAlpha.ReplaceAllString(testString3, "a")) + + actual, err := Render(context.TODO(), []string{ + "hello", + "{{ .perRetryUniqueKey }}", + "world", + "{{ .rawOutputDataPrefix }}", + }, params) + assert.NoError(t, err) + assert.Equal(t, []string{ + "hello", + "aper_retry_unique_key", + "world", + "s3://custom-bucket", + }, actual) + }) +} + +func BenchmarkRegexCommandArgs(b *testing.B) { + for i := 0; i < b.N; i++ { + inputFileRegex.MatchString("{{ .InputFile }}") + } +} + +func TestInputRegexMatch(t *testing.T) { + assert.True(t, inputFileRegex.MatchString("{{$input}}")) + assert.True(t, inputFileRegex.MatchString("{{ $Input }}")) + assert.True(t, inputFileRegex.MatchString("{{.input}}")) + assert.True(t, inputFileRegex.MatchString("{{ .Input }}")) + assert.True(t, inputFileRegex.MatchString("{{ .Input }}")) + assert.True(t, inputFileRegex.MatchString("{{ .Input }}")) + assert.True(t, inputFileRegex.MatchString("{{ .Input}}")) + assert.True(t, inputFileRegex.MatchString("{{.Input }}")) + assert.True(t, inputFileRegex.MatchString("--something={{.Input}}")) + assert.False(t, inputFileRegex.MatchString("{{input}}"), "Missing $") + assert.False(t, inputFileRegex.MatchString("{$input}}"), "Missing Brace") +} + +func TestOutputRegexMatch(t *testing.T) { + assert.True(t, outputRegex.MatchString("{{.OutputPrefix}}")) + assert.True(t, outputRegex.MatchString("{{ .OutputPrefix }}")) + assert.True(t, outputRegex.MatchString("{{ .OutputPrefix }}")) + assert.True(t, outputRegex.MatchString("{{ .OutputPrefix }}")) + assert.True(t, outputRegex.MatchString("{{ .OutputPrefix}}")) + assert.True(t, outputRegex.MatchString("{{.OutputPrefix }}")) + assert.True(t, outputRegex.MatchString("--something={{.OutputPrefix}}")) + assert.False(t, outputRegex.MatchString("{{output}}"), "Missing $") + assert.False(t, outputRegex.MatchString("{.OutputPrefix}}"), "Missing Brace") +} + +func getBlobLiteral(uri string) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Blob{ + Blob: &core.Blob{ + Metadata: nil, + Uri: uri, + }, + }, + }, + }, + } +} + +func getSchemaLiteral(uri string) *core.Literal { + return &core.Literal{ + Value: &core.Literal_Scalar{ + Scalar: &core.Scalar{ + Value: &core.Scalar_Schema{ + Schema: &core.Schema{Type: nil, Uri: uri}, + }, + }, + }, + } +} + +func TestSerializeLiteral(t *testing.T) { + ctx := context.Background() + + t.Run("serialize blob", func(t *testing.T) { + b := getBlobLiteral("asdf fdsa") + interpolated, err := serializeLiteral(ctx, b) + assert.NoError(t, err) + assert.Equal(t, "asdf fdsa", interpolated) + }) + + t.Run("serialize blob", func(t *testing.T) { + s := getSchemaLiteral("s3://some-bucket/fdsa/x.parquet") + interpolated, err := serializeLiteral(ctx, s) + assert.NoError(t, err) + assert.Equal(t, "s3://some-bucket/fdsa/x.parquet", interpolated) + }) +} + +func TestSerializeLiteralScalar_BinaryMessagePack(t *testing.T) { + // Create a simple map to be serialized into MessagePack format + testMap := map[string]interface{}{ + "a": 1, + "b": true, + "c": 1.1, + "d": "string", + } + + // Serialize the map using MessagePack + encodedData, err := msgpack.Marshal(testMap) + assert.NoError(t, err) + + // Create the core.Scalar_Binary with the encoded MessagePack data and MESSAGEPACK tag + binaryScalar := &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: encodedData, + Tag: coreutils.MESSAGEPACK, + }, + }, + } + + // Call the function we want to test + result, err := serializeLiteralScalar(binaryScalar) + assert.NoError(t, err) + + // Since the map should be decoded back, we expect a simple string representation of the map + expectedResult := "map[a:1 b:true c:1.1 d:string]" + assert.Equal(t, expectedResult, result) +} + +func TestSerializeLiteralScalar_BinaryUnsupportedTag(t *testing.T) { + // Create some binary data for testing + binaryData := []byte{0x01, 0x02, 0x03} + + // Create a core.Scalar_Binary with an unsupported tag + binaryScalar := &core.Scalar{ + Value: &core.Scalar_Binary{ + Binary: &core.Binary{ + Value: binaryData, + Tag: "unsupported-tag", + }, + }, + } + + // Call the function and expect an error because the tag is unsupported + _, err := serializeLiteralScalar(binaryScalar) + assert.Error(t, err) + assert.Contains(t, err.Error(), "unsupported binary tag") +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/transition.go b/flyteplugins/go/tasks/pluginmachinery/core/transition.go new file mode 100644 index 0000000000..f7feccc4fc --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/transition.go @@ -0,0 +1,59 @@ +package core + +import "fmt" + +//go:generate enumer --type=TransitionType + +// Type of Transition, refer to Transition to understand what transition means +type TransitionType int + +const ( + // The transition is eventually consistent. For all the state written may not be visible in the next call, but eventually will persist + // Best to use when the plugin logic is completely idempotent. This is also the most performant option. + TransitionTypeEphemeral TransitionType = iota + // @deprecated support for Barrier type transitions has been deprecated + // This transition tries its best to make the latest state visible for every consecutive read. But, it is possible + // to go back in time, i.e. monotonic consistency is violated (in rare cases). + TransitionTypeBarrier +) + +// A Plugin Handle method returns a Transition. This transition indicates to the Flyte framework that if the plugin wants to continue "Handle"ing this task, +// or if wants to move the task to success, attempt a retry or fail. The transition automatically sends an event to Admin service which shows the plugin +// provided information in the Console/cli etc +// The information to be published is in the PhaseInfo structure. Transition Type indicates the type of consistency for subsequent handle calls in case the phase info results in a non terminal state. +// the PhaseInfo structure is very important and is used to record events in Admin. Only if the Phase + PhaseVersion was not previously observed, will an event be published to Admin +// there are only a configurable number of phase-versions usable. Usually it is preferred to be a monotonically increasing sequence +type Transition struct { + ttype TransitionType + info PhaseInfo +} + +func (t Transition) Type() TransitionType { + return t.ttype +} + +func (t Transition) Info() PhaseInfo { + return t.info +} + +func (t *Transition) SetInfo(info PhaseInfo) { + t.info = info +} + +func (t Transition) String() string { + return fmt.Sprintf("%s,%s", t.ttype, t.info) +} + +// UnknownTransition is synonymous to UndefinedTransition. To be returned when an error is observed +var UnknownTransition = Transition{TransitionTypeEphemeral, PhaseInfoUndefined} + +// Creates and returns a new Transition based on the PhaseInfo.Phase +// Phases: PhaseNotReady, PhaseQueued, PhaseInitializing, PhaseRunning will cause the system to continue invoking Handle +func DoTransitionType(ttype TransitionType, info PhaseInfo) Transition { + return Transition{ttype: ttype, info: info} +} + +// Same as DoTransition, but TransitionTime is always Ephemeral +func DoTransition(info PhaseInfo) Transition { + return DoTransitionType(TransitionTypeEphemeral, info) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/transition_test.go b/flyteplugins/go/tasks/pluginmachinery/core/transition_test.go new file mode 100644 index 0000000000..3455c9a67b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/transition_test.go @@ -0,0 +1,53 @@ +package core + +import ( + "fmt" + "testing" + + "github.com/magiconair/properties/assert" +) + +func TestTransitionType_String(t *testing.T) { + assert.Equal(t, TransitionTypeBarrier.String(), "TransitionTypeBarrier") + assert.Equal(t, TransitionTypeEphemeral.String(), "TransitionTypeEphemeral") +} + +func ExampleTransition_String() { + trns := DoTransitionType(TransitionTypeBarrier, PhaseInfoUndefined) + fmt.Println(trns.String()) + // Output: TransitionTypeBarrier,Phase Reason:> +} + +func TestDoTransition(t *testing.T) { + t.Run("unknown", func(t *testing.T) { + trns := DoTransition(PhaseInfoUndefined) + assert.Equal(t, TransitionTypeEphemeral, trns.Type()) + assert.Equal(t, PhaseInfoUndefined, trns.Info()) + assert.Equal(t, PhaseUndefined, trns.Info().Phase()) + }) + + t.Run("someInfo", func(t *testing.T) { + pInfo := PhaseInfoSuccess(nil) + trns := DoTransition(pInfo) + assert.Equal(t, TransitionTypeEphemeral, trns.Type()) + assert.Equal(t, pInfo, trns.Info()) + assert.Equal(t, PhaseSuccess, trns.Info().Phase()) + }) +} + +func TestDoTransitionType(t *testing.T) { + t.Run("unknown", func(t *testing.T) { + trns := DoTransitionType(TransitionTypeBarrier, PhaseInfoUndefined) + assert.Equal(t, TransitionTypeBarrier, trns.Type()) + assert.Equal(t, PhaseInfoUndefined, trns.Info()) + assert.Equal(t, PhaseUndefined, trns.Info().Phase()) + }) + + t.Run("someInfo", func(t *testing.T) { + pInfo := PhaseInfoSuccess(nil) + trns := DoTransitionType(TransitionTypeBarrier, pInfo) + assert.Equal(t, TransitionTypeBarrier, trns.Type()) + assert.Equal(t, pInfo, trns.Info()) + assert.Equal(t, PhaseSuccess, trns.Info().Phase()) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/core/transitiontype_enumer.go b/flyteplugins/go/tasks/pluginmachinery/core/transitiontype_enumer.go new file mode 100644 index 0000000000..41608fb36a --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/core/transitiontype_enumer.go @@ -0,0 +1,49 @@ +// Code generated by "enumer --type=TransitionType"; DO NOT EDIT. + +package core + +import ( + "fmt" +) + +const _TransitionTypeName = "TransitionTypeEphemeralTransitionTypeBarrier" + +var _TransitionTypeIndex = [...]uint8{0, 23, 44} + +func (i TransitionType) String() string { + if i < 0 || i >= TransitionType(len(_TransitionTypeIndex)-1) { + return fmt.Sprintf("TransitionType(%d)", i) + } + return _TransitionTypeName[_TransitionTypeIndex[i]:_TransitionTypeIndex[i+1]] +} + +var _TransitionTypeValues = []TransitionType{0, 1} + +var _TransitionTypeNameToValueMap = map[string]TransitionType{ + _TransitionTypeName[0:23]: 0, + _TransitionTypeName[23:44]: 1, +} + +// TransitionTypeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func TransitionTypeString(s string) (TransitionType, error) { + if val, ok := _TransitionTypeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to TransitionType values", s) +} + +// TransitionTypeValues returns all values of the enum +func TransitionTypeValues() []TransitionType { + return _TransitionTypeValues +} + +// IsATransitionType returns "true" if the value is listed in the enum definition. "false" otherwise +func (i TransitionType) IsATransitionType() bool { + for _, v := range _TransitionTypeValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/encoding/encoder.go b/flyteplugins/go/tasks/pluginmachinery/encoding/encoder.go new file mode 100644 index 0000000000..8e31650edc --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/encoding/encoder.go @@ -0,0 +1,105 @@ +package encoding + +import ( + "encoding/base32" + "fmt" + "hash" + "hash/fnv" + "strings" +) + +const specialEncoderKey = "abcdefghijklmnopqrstuvwxyz123456" + +var Base32Encoder = base32.NewEncoding(specialEncoderKey).WithPadding(base32.NoPadding) + +// Algorithm defines an enum for the encoding algorithm to use. +type Algorithm uint32 + +const ( + // Algorithm32 uses fnv32 bit encoder. + Algorithm32 Algorithm = iota + + // Algorithm64 uses fnv64 bit encoder. + Algorithm64 + + // Algorithm128 uses fnv128 bit encoder. + Algorithm128 +) + +type Option interface { + option() +} + +// AlgorithmOption defines a wrapper to pass the algorithm to encoding functions. +type AlgorithmOption struct { + Option + algo Algorithm +} + +// NewAlgorithmOption wraps the Algorithm into an AlgorithmOption to pass to the encoding functions. +func NewAlgorithmOption(algo Algorithm) AlgorithmOption { + return AlgorithmOption{ + algo: algo, + } +} + +// FixedLengthUniqueID creates a new UniqueID that is based on the inputID and of a specified length, if the given id is +// longer than the maxLength. +func FixedLengthUniqueID(inputID string, maxLength int, options ...Option) (string, error) { + if len(inputID) <= maxLength { + return inputID, nil + } + + var hasher hash.Hash + for _, option := range options { + if algoOption, casted := option.(AlgorithmOption); casted { + switch algoOption.algo { + case Algorithm32: + hasher = fnv.New32a() + case Algorithm64: + hasher = fnv.New64a() + case Algorithm128: + hasher = fnv.New128a() + } + } + } + + if hasher == nil { + hasher = fnv.New32a() + } + + // Using 32a/64a an error can never happen, so this will always remain not covered by a unit test + _, _ = hasher.Write([]byte(inputID)) // #nosec + b := hasher.Sum(nil) + + // Encoding Length Calculation: + // Base32 Encoder will encode every 5 bits into an output character (2 ^ 5 = 32) + // output length = ciel( / 5) + // for 32a hashing = ceil(32 / 5) = 7 + // for 64a hashing = ceil(64 / 5) = 13 + // We prefix with character `f` so the final output is 8 or 14 + + finalStr := "f" + Base32Encoder.EncodeToString(b) + if len(finalStr) > maxLength { + return finalStr, fmt.Errorf("max Length is too small, cannot create an encoded string that is so small") + } + return finalStr, nil +} + +// FixedLengthUniqueIDForParts creates a new uniqueID using the parts concatenated using `-` and ensures that the +// uniqueID is not longer than the maxLength. In case a simple concatenation yields a longer string, a new hashed ID is +// created which is always around 8 characters in length. +func FixedLengthUniqueIDForParts(maxLength int, parts []string, options ...Option) (string, error) { + b := strings.Builder{} + for i, p := range parts { + if i > 0 && b.Len() > 0 { + // Ignoring the error as it always returns a nil error + _, _ = b.WriteRune('-') // #nosec + } + + // Ignoring the error as this is always nil + _, _ = b.WriteString(p) // #nosec + } + + return FixedLengthUniqueID(b.String(), maxLength, options...) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/encoding/encoder_test.go b/flyteplugins/go/tasks/pluginmachinery/encoding/encoder_test.go new file mode 100644 index 0000000000..9dc807f154 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/encoding/encoder_test.go @@ -0,0 +1,104 @@ +package encoding + +import ( + "hash" + "hash/fnv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFixedLengthUniqueID(t *testing.T) { + tests := []struct { + name string + input string + maxLength int + output string + expectError bool + }{ + {"smallerThanMax", "x", 5, "x", false}, + {"veryLowLimit", "xx", 1, "flfryc2i", true}, + {"highLimit", "xxxxxx", 5, "fufiti6i", true}, + {"higherLimit", "xxxxx", 10, "xxxxx", false}, + {"largeID", "xxxxxxxxxxxxxxxxxxxxxxxx", 20, "fuaa3aji", false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i, err := FixedLengthUniqueID(test.input, test.maxLength) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + + assert.Equal(t, test.output, i) + }) + } +} + +func TestFixedLengthUniqueIDForParts(t *testing.T) { + tests := []struct { + name string + parts []string + maxLength int + algorithm Algorithm + output string + expectError bool + }{ + {"smallerThanMax", []string{"x", "y", "z"}, 10, Algorithm32, "x-y-z", false}, + {"veryLowLimit", []string{"x", "y"}, 1, Algorithm32, "fz2jizji", true}, + {"fittingID", []string{"x"}, 2, Algorithm32, "x", false}, + {"highLimit", []string{"x", "y", "z"}, 4, Algorithm32, "fxzsoqrq", true}, + {"largeID", []string{"x", "y", "z", "m", "n", "y", "z", "m", "n", "y", "z", "m", "n"}, 15, Algorithm32, "fe63sz6y", false}, + {"largeID", []string{"x", "y", "z", "m", "n", "y", "z", "m", "n", "y", "z", "m", "n"}, 15, Algorithm64, "fwp4bky2kucex5", false}, + {"largeID", []string{"x", "y", "z", "m", "n", "y", "z", "m", "n", "y", "z", "m", "n", "z", "m", "n", "y", "z", "m", "n"}, 30, Algorithm128, "fbmesl15enghpjepzjm5cp1zfqe", false}, + } + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + i, err := FixedLengthUniqueIDForParts(test.maxLength, test.parts, NewAlgorithmOption(test.algorithm)) + if test.expectError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + assert.Equal(t, test.output, i) + }) + } +} + +func benchmarkKB(b *testing.B, h hash.Hash) { + b.SetBytes(1024) + data := make([]byte, 1024) + for i := range data { + data[i] = byte(i) + } + + in := make([]byte, 0, h.Size()) + + b.ResetTimer() + for i := 0; i < b.N; i++ { + h.Reset() + h.Write(data) + h.Sum(in) + } +} + +// Documented Results: +// goos: darwin +// goarch: amd64 +// pkg: github.com/flyteorg/flyteplugins/go/tasks/pluginmachinery/encoding +// cpu: Intel(R) Core(TM) i9-9980HK CPU @ 2.40GHz +// BenchmarkFixedLengthUniqueID +// BenchmarkFixedLengthUniqueID/New32a +// BenchmarkFixedLengthUniqueID/New32a-16 1000000 1088 ns/op 941.25 MB/s +// BenchmarkFixedLengthUniqueID/New64a +// BenchmarkFixedLengthUniqueID/New64a-16 1239402 951.3 ns/op 1076.39 MB/s +func BenchmarkFixedLengthUniqueID(b *testing.B) { + b.Run("New32a", func(b *testing.B) { + benchmarkKB(b, fnv.New32a()) + }) + + b.Run("New64a", func(b *testing.B) { + benchmarkKB(b, fnv.New64a()) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go new file mode 100644 index 0000000000..e402e856ce --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config.go @@ -0,0 +1,394 @@ +// Package config contains configuration for the flytek8s module - which is global configuration for all Flyte K8s interactions. +// This config is under the subsection `k8s` and registered under the Plugin config +// All K8s based plugins can optionally use the flytek8s module and this configuration allows controlling the defaults +// For example if for every container execution if some default Environment Variables or Annotations should be used, then they can be configured here +// An important configuration is ResourceTolerations that are applied to every container execution that needs some resource on the cluster +package config + +import ( + "strings" + "time" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + config2 "github.com/flyteorg/flyte/v2/flytestdlib/config" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +//go:generate pflags K8sPluginConfig --default-var=defaultK8sConfig + +const k8sPluginConfigSectionKey = "k8s" + +// ResourceNvidiaGPU is the name of the Nvidia GPU resource. +// Copied from: k8s.io/autoscaler/cluster-autoscaler/utils/gpu/gpu.go +const ResourceNvidiaGPU v1.ResourceName = "nvidia.com/gpu" + +var defaultCPURequest = resource.MustParse("1000m") +var defaultMemoryRequest = resource.MustParse("1024Mi") + +var ( + defaultK8sConfig = K8sPluginConfig{ + DefaultAnnotations: map[string]string{ + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false", + }, + CoPilot: FlyteCoPilotConfig{ + NamePrefix: "flyte-copilot-", + Image: "cr.flyte.org/flyteorg/flytecopilot:v0.0.15", + DefaultInputDataPath: "/var/flyte/inputs", + InputVolumeName: "flyte-inputs", + DefaultOutputPath: "/var/flyte/outputs", + OutputVolumeName: "flyte-outputs", + CPU: "500m", + Memory: "128Mi", + Timeout: config2.Duration{ + Duration: time.Hour * 1, + }, + }, + DefaultCPURequest: defaultCPURequest, + DefaultMemoryRequest: defaultMemoryRequest, + CreateContainerErrorGracePeriod: config2.Duration{ + Duration: time.Minute * 3, + }, + CreateContainerConfigErrorGracePeriod: config2.Duration{ + Duration: time.Minute * 0, + }, + ImagePullBackoffGracePeriod: config2.Duration{ + Duration: time.Minute * 3, + }, + PodPendingTimeout: config2.Duration{ + Duration: 0, + }, + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + GpuResourceName: ResourceNvidiaGPU, + AcceleratorDevices: map[string]string{ + // NVIDIA GPUs + "A10": "nvidia-a10", + "A10G": "nvidia-a10g", + "A100": "nvidia-tesla-a100", + "A100 80G": "nvidia-a100-80gb", + "A100G": "nvidia-a100g", + "B200": "nvidia-b200", + "GB200": "nvidia-gb200", + "H100": "nvidia-h100", + "H100 80G": "nvidia-h100-80gb", + "H100 MEGA 80G": "nvidia-h100-mega-80gb", + "H200": "nvidia-h200", + "K80": "nvidia-tesla-k80", + "L4": "nvidia-l4", + "L40s": "nvidia-l40s", + "L4_VWS": "nvidia-l4-vws", + "M60": "nvidia-tesla-m60", + "P4": "nvidia-tesla-p4", + "P100": "nvidia-tesla-p100", + "RTX PRO 6000": "nvidia-rtx-pro-6000", + "T4": "nvidia-tesla-t4", + "V100": "nvidia-tesla-v100", + + // Google Cloud TPUs + "V5E": "tpu-v5-lite-podslice", + "V5P": "tpu-v5p-slice", + "V6E": "tpu-v6e-slice", + + // AWS Neuron + "INF1": "aws-neuron-inf1", + "INF2": "aws-neuron-inf2", + "TRN1": "aws-neuron-trn1", + "TRN1N": "aws-neuron-trn1n", + "TRN2": "aws-neuron-trn2", + "TRN2U": "aws-neuron-trn2u", + + // AMD GPUs + "MI100": "amd-mi100", + "MI210": "amd-mi210", + "MI250": "amd-mi250", + "MI250X": "amd-mi250x", + "MI300A": "amd-mi300a", + "MI300X": "amd-mi300x", + "MI325X": "amd-mi325x", + "MI350X": "amd-mi350x", + "MI355X": "amd-mi355x", + + // Habana Gaudi (Intel) + "GAUDI1": "habana-gaudi-dl1", + }, + AcceleratorDeviceClasses: map[string]AcceleratorDeviceClassConfig{ + core.GPUAccelerator_NVIDIA_GPU.String(): { + ResourceName: "nvidia.com/gpu", + }, + core.GPUAccelerator_GOOGLE_TPU.String(): { + ResourceName: "google.com/tpu", + DeviceNodeLabel: "cloud.google.com/gke-tpu-accelerator", + PartitionSizeNodeLabel: "cloud.google.com/gke-tpu-topology", + }, + core.GPUAccelerator_AMAZON_NEURON.String(): { + ResourceName: "aws.amazon.com/neuron", + }, + core.GPUAccelerator_AMD_GPU.String(): { + ResourceName: "amd.com/gpu", + }, + core.GPUAccelerator_HABANA_GAUDI.String(): { + ResourceName: "habana.ai/gaudi", + }, + }, + DefaultPodTemplateResync: config2.Duration{ + Duration: 30 * time.Second, + }, + UpdateBaseBackoffDuration: 10, + UpdateBackoffRetries: 5, + AddTolerationsForExtendedResources: []string{}, + } + + // K8sPluginConfigSection provides a singular top level config section for all plugins. + // If you are a plugin developer writing a k8s plugin, register your config section as a subsection to this. + K8sPluginConfigSection = config.MustRegisterSubSection(k8sPluginConfigSectionKey, &defaultK8sConfig) +) + +// K8sPluginConfig should be used to configure per-pod defaults for the entire platform. This allows adding global defaults +// for pods that are being launched. For example, default annotations, labels, if a finalizer should be injected, +// if taints/tolerations should be used for certain resource types etc. +type K8sPluginConfig struct { + // InjectFinalizer is a boolean flag that indicates if a finalizer should be injected into every K8s resource launched + InjectFinalizer bool `json:"inject-finalizer" pflag:",Instructs the plugin to inject a finalizer on startTask and remove it on task termination."` + + // ------------------------------------------------------------------------------------------------------------- + // Default Configurations to be applied to all Pods launched by Flyte. These are always applied to every Pod. + // Thus if a Pod is interruptible, it will have the default + interruptible tolerations + + // Provide default annotations that should be added to K8s resource + DefaultAnnotations map[string]string `json:"default-annotations" pflag:"-,Defines a set of default annotations to add to the produced pods."` + // Provide default labels that should be added to K8s resource + DefaultLabels map[string]string `json:"default-labels" pflag:"-,Defines a set of default labels to add to the produced pods."` + // Provide additional environment variable pairs that plugin authors will provide to containers + DefaultEnvVars map[string]string `json:"default-env-vars" pflag:"-,Additional environment variable that should be injected into every resource"` + // Provide additional environment variable pairs whose values resolve from the plugin's execution environment. + DefaultEnvVarsFromEnv map[string]string `json:"default-env-vars-from-env" pflag:"-,Additional environment variable that should be injected into every resource"` + // Provide additional environment variable parts from configMaps + DefaultEnvFromConfigMaps []string `json:"default-env-from-configmaps" pflag:"-,Additional environment variable sets that should be injected into each pod from these configMaps"` + // Provide additional environment variable parts from secrets + DefaultEnvFromSecrets []string `json:"default-env-from-secrets" pflag:"-,Additional environment variable sets that should be injected into each pod from these secret"` + + // default cpu requests for a container + DefaultCPURequest resource.Quantity `json:"default-cpus" pflag:",Defines a default value for cpu for containers if not specified."` + // default memory requests for a container + DefaultMemoryRequest resource.Quantity `json:"default-memory" pflag:",Defines a default value for memory for containers if not specified."` + + // Default Tolerations that will be added to every Pod that is created by Flyte. These can be used in heterogeneous clusters, where one wishes to keep all pods created by Flyte on a separate + // set of nodes. + DefaultTolerations []v1.Toleration `json:"default-tolerations" pflag:"-,Tolerations to be applied for every node that is launched by Flyte. Useful in non dedicated flyte clusters"` + // Default Node Selector Labels for pods. These NodeSelector labels are added to all pods, created by Flyte, unless they are marked as interruptible (default of interruptible are different). + DefaultNodeSelector map[string]string `json:"default-node-selector" pflag:"-,Defines a set of node selector labels to add to the all pods launched by Flyte. Useful in non dedicated Flyte clusters"` + // Default Affinity that is applied to every pod that Flyte launches + DefaultAffinity *v1.Affinity `json:"default-affinity,omitempty" pflag:"-,Defines default Affinity to be added for every Pod launched by Flyte. Useful in non dedicated Flyte clusters"` + + // Default scheduler that should be used for all pods or CRD that accept Scheduler name. + SchedulerName string `json:"scheduler-name" pflag:",Defines scheduler name."` + + // ----------------------------------------------------------------- + // Special tolerations and node selector for Interruptible tasks. This allows scheduling interruptible tasks onto specific hardware + + // Tolerations for interruptible k8s pods: These tolerations are added to the pods that can tolerate getting evicted from a node. We + // can leverage this for better bin-packing and using low-reliability cheaper machines. + InterruptibleTolerations []v1.Toleration `json:"interruptible-tolerations" pflag:"-,Tolerations to be applied for interruptible pods"` + // Node Selector Labels for interruptible pods: Similar to InterruptibleTolerations, these node selector labels are added for pods that can tolerate + // eviction. + // Deprecated: Please use InterruptibleNodeSelectorRequirement/NonInterruptibleNodeSelectorRequirement + InterruptibleNodeSelector map[string]string `json:"interruptible-node-selector" pflag:"-,Defines a set of node selector labels to add to the interruptible pods."` + // Node Selector Requirements to be added to interruptible and non-interruptible + // pods respectively + InterruptibleNodeSelectorRequirement *v1.NodeSelectorRequirement `json:"interruptible-node-selector-requirement" pflag:"-,Node selector requirement to add to interruptible pods"` + NonInterruptibleNodeSelectorRequirement *v1.NodeSelectorRequirement `json:"non-interruptible-node-selector-requirement" pflag:"-,Node selector requirement to add to non-interruptible pods"` + + // ---------------------------------------------------------------------- + // Specific tolerations that are added for certain resources. Useful for maintaining gpu resources separate in the cluster + + // Tolerations in the cluster that should be applied for a specific resource + // Currently we support simple resource based tolerations only + ResourceTolerations map[v1.ResourceName][]v1.Toleration `json:"resource-tolerations" pflag:"-,Default tolerations to be applied for resource of type 'key'"` + + // Flyte CoPilot Configuration + CoPilot FlyteCoPilotConfig `json:"co-pilot" pflag:",Co-Pilot Configuration"` + + // DeleteResourceOnFinalize instructs the system to delete the resource on finalize. This ensures that no resources + // are kept around (potentially consuming cluster resources). This, however, will cause k8s log links to expire as + // soon as the resource is finalized. + DeleteResourceOnFinalize bool `json:"delete-resource-on-finalize" pflag:",Instructs the system to delete the resource upon successful execution of a k8s pod rather than have the k8s garbage collector clean it up. This ensures that no resources are kept around (potentially consuming cluster resources). This, however, will cause k8s log links to expire as soon as the resource is finalized."` + + // Time to wait for transient CreateContainerError errors to be resolved. If the + // error persists past this grace period, it will be inferred to be a permanent + // one, and the corresponding task marked as failed + CreateContainerErrorGracePeriod config2.Duration `json:"create-container-error-grace-period" pflag:"-,Time to wait for transient CreateContainerError errors to be resolved."` + + // Time to wait for transient CreateContainerConfigError errors to be resolved. If the + // error persists past this grace period, it will be inferred to be a permanent error. + // The pod will be deleted, and the corresponding task marked as failed. + CreateContainerConfigErrorGracePeriod config2.Duration `json:"create-container-config-error-grace-period" pflag:"-,Time to wait for transient CreateContainerConfigError errors to be resolved."` + + // Time to wait for transient ImagePullBackoff errors to be resolved. If the + // error persists past this grace period, it will be inferred to be a permanent + // one, and the corresponding task marked as failed + ImagePullBackoffGracePeriod config2.Duration `json:"image-pull-backoff-grace-period" pflag:"-,Time to wait for transient ImagePullBackoff errors to be resolved."` + + // ImagePullPolicy for the submitted pod. + ImagePullPolicy v1.PullPolicy `json:"image-pull-policy" pflag:"-,Image pull policy for all k8s pods created by FlytePropeller."` + + // Time to wait while pod is in pending phase. If the pod is stuck in + // pending phase past this timeout, it will be inferred to be a permanent + // issue, and the corresponding task marked as failed + PodPendingTimeout config2.Duration `json:"pod-pending-timeout" pflag:"-,Time to wait while pod is stuck in pending."` + + // The node label that specifies the attached GPU device. + GpuDeviceNodeLabel string `json:"gpu-device-node-label" pflag:"-,The node label that specifies the attached GPU device."` + + // The node label that specifies the attached GPU partition size. + GpuPartitionSizeNodeLabel string `json:"gpu-partition-size-node-label" pflag:"-,The node label that specifies the attached GPU partition size."` + + // Override for node selector requirement added to pods intended for unpartitioned GPU nodes. + GpuUnpartitionedNodeSelectorRequirement *v1.NodeSelectorRequirement `json:"gpu-unpartitioned-node-selector-requirement" pflag:"-,Override for node selector requirement added to pods intended for unpartitioned GPU nodes."` + + // Toleration added to pods intended for unpartitioned GPU nodes. + GpuUnpartitionedToleration *v1.Toleration `json:"gpu-unpartitioned-toleration" pflag:"-,Toleration added to pods intended for unpartitioned GPU nodes."` + + // Deprecated: Use AcceleratorDeviceClasses instead. The name of the GPU resource to use when the task resource requests GPUs. + GpuResourceName v1.ResourceName `json:"gpu-resource-name" pflag:"-,The name of the GPU resource to use when the task resource requests GPUs."` + + // AcceleratorDevices maps accelerator devices to provisioned Kubernetes node labels. + AcceleratorDevices map[string]string `json:"accelerator-devices" pflag:"-,Maps accelerator devices to provisionedKubernetes node labels."` + + // AcceleratorDeviceClasses maps accelerator device classes to their configuration overrides. + // This allows configuring resource names, node labels, and tolerations for different accelerator types (NVIDIA GPU, Google TPU, Amazon Neuron, AMD GPU). + AcceleratorDeviceClasses map[string]AcceleratorDeviceClassConfig `json:"accelerator-device-classes" pflag:"-,Maps accelerator device classes to their configuration overrides."` + + // DefaultPodSecurityContext provides a default pod security context that should be applied for every pod that is launched by FlytePropeller. This may not be applicable to all plugins. For + // downstream plugins - i.e. TensorflowOperators may not support setting this, but Spark does. + DefaultPodSecurityContext *v1.PodSecurityContext `json:"default-pod-security-context" pflag:"-,Optionally specify any default pod security context that should be applied to every Pod launched by FlytePropeller."` + + // DefaultSecurityContext provides a default container security context that should be applied for the primary container launched and created by FlytePropeller. This may not be applicable to all plugins. For + // // downstream plugins - i.e. TensorflowOperators may not support setting this, but Spark does. + DefaultSecurityContext *v1.SecurityContext `json:"default-security-context" pflag:"-,Optionally specify a default security context that should be applied to every container launched/created by FlytePropeller. This will not be applied to plugins that do not support it or to user supplied containers in pod tasks."` + + // EnableHostNetworkingPod is a binary switch to enable `hostNetwork: true` for all pods launched by Flyte. + // Refer to - https://kubernetes.io/docs/concepts/policy/pod-security-policy/#host-namespaces. + // As a follow up, the default pod configurations will now be adjusted using podTemplates per namespace + EnableHostNetworkingPod *bool `json:"enable-host-networking-pod" pflag:"-,If true, will schedule all pods with hostNetwork: true."` + + // DefaultPodDNSConfig provides a default pod DNS config that that should be applied for the primary container launched and created by FlytePropeller. This may not be applicable to all plugins. For + // // downstream plugins - i.e. TensorflowOperators may not support setting this. + DefaultPodDNSConfig *v1.PodDNSConfig `json:"default-pod-dns-config" pflag:"-,Optionally specify a default DNS config that should be applied to every Pod launched by FlytePropeller."` + + // DefaultPodTemplateName that serves as the base PodTemplate for all k8s pods (including + // individual containers) that are creating by FlytePropeller. + DefaultPodTemplateName string `json:"default-pod-template-name" pflag:",Name of the PodTemplate to use as the base for all k8s pods created by FlytePropeller."` + + // DefaultPodTemplateResync defines the frequency at which the k8s informer resyncs the default + // pod template resources. + DefaultPodTemplateResync config2.Duration `json:"default-pod-template-resync" pflag:",Frequency of resyncing default pod templates"` + + // SendObjectEvents indicates whether to send k8s object events in TaskExecutionEvent updates (similar to kubectl get events). + SendObjectEvents bool `json:"send-object-events" pflag:",If true, will send k8s object events in TaskExecutionEvent updates."` + + // Initial delay in exponential backoff when updating a resource in milliseconds. + UpdateBaseBackoffDuration int `json:"update-base-backoff-duration" pflag:",Initial delay in exponential backoff when updating a resource in milliseconds."` + + // Number of retries for exponential backoff when updating a resource. + UpdateBackoffRetries int `json:"update-backoff-retries" pflag:",Number of retries for exponential backoff when updating a resource."` + + // Extended resources that should be added to the tolerations automatically. + AddTolerationsForExtendedResources []string `json:"add-tolerations-for-extended-resources" pflag:",Name of the extended resources for which tolerations should be added."` + + // DisableInjectOwnerReferences is a boolean flag that indicates if owner references should be injected into the k8s resources. + DisableInjectOwnerReferences bool `json:"disable-inject-owner-references" pflag:",Override to not set owner references on k8s resources. This is useful for V2 node execution"` +} + +// FlyteCoPilotConfig specifies configuration for the Flyte CoPilot system. FlyteCoPilot, allows running flytekit-less containers +// in K8s, where the IO is managed by the FlyteCoPilot sidecar process. +type FlyteCoPilotConfig struct { + // Co-pilot sidecar container name + NamePrefix string `json:"name" pflag:",Flyte co-pilot sidecar container name prefix. (additional bits will be added after this)"` + // Docker image FQN where co-pilot binary is installed + Image string `json:"image" pflag:",Flyte co-pilot Docker Image FQN"` + // Default Input Path for every task execution that uses co-pilot. This is used only if a input path is not provided by the user and inputs are required for the task + DefaultInputDataPath string `json:"default-input-path" pflag:",Default path where the volume should be mounted"` + // Default Output Path for every task execution that uses co-pilot. This is used only if a output path is not provided by the user and outputs are required for the task + DefaultOutputPath string `json:"default-output-path" pflag:",Default path where the volume should be mounted"` + // Name of the input volume + InputVolumeName string `json:"input-vol-name" pflag:",Name of the data volume that is created for storing inputs"` + // Name of the output volume + OutputVolumeName string `json:"output-vol-name" pflag:",Name of the data volume that is created for storing outputs"` + // Time for which the sidecar container should wait after starting up, for the primary process to appear. If it does not show up in this time + // the process will be assumed to be dead or in a terminal condition and will trigger an abort. + StartTimeout config2.Duration `json:"start-timeout" pflag:"-,Time for which the sidecar should wait on startup before assuming the primary container to have failed startup."` + // Timeout for upload + Timeout config2.Duration `json:"timeout" pflag:"-,Max time to allow for uploads to complete."` + // Resources for CoPilot Containers + CPU string `json:"cpu" pflag:",Used to set cpu for co-pilot containers"` + Memory string `json:"memory" pflag:",Used to set memory for co-pilot containers"` + Storage string `json:"storage" pflag:",Default storage limit for individual inputs / outputs"` + StorageConfigOverride *storage.Config `json:"storage-config-override" pflag:"-,Override for the storage config to use for co-pilot"` +} + +type AcceleratorDeviceClassConfig struct { + // Kubernetes resource name for the accelerator device class. + ResourceName v1.ResourceName `json:"resource-name" pflag:",Kubernetes resource name for the accelerator device class."` + + // The node label that specifies the attached accelerator device. + DeviceNodeLabel string `json:"device-node-label" pflag:"-,The node label that specifies the attached device."` + + // The node label that specifies the attached accelerator partition size. + PartitionSizeNodeLabel string `json:"partition-size-node-label" pflag:"-,The node label that specifies the attached partition size."` + + // Override for node selector requirement added to pods intended for unpartitioned nodes. + UnpartitionedNodeSelectorRequirement *v1.NodeSelectorRequirement `json:"unpartitioned-node-selector-requirement" pflag:"-,Override for node selector requirement added to pods intended for unpartitioned nodes."` + + // Toleration added to pods intended for unpartitioned nodes. + UnpartitionedToleration *v1.Toleration `json:"unpartitioned-toleration" pflag:"-,Toleration added to pods intended for unpartitioned nodes."` + + // PodTemplate provides device-class-specific defaults for pods using this accelerator. + // Platform operators can define pod-level and container-level configuration that serves + // as a base template for tasks using this device class. Task-specific configurations + // can override these defaults. + // + // Precedence (lowest to highest): + // 1. Base PodTemplate (cluster/namespace defaults) + // 2. Device Class PodTemplate (this config) - device-specific defaults + // 3. Task PodSpec - task-specific values override device class for scalars + // + // Merge behavior (BASE semantics): + // - Scalar fields: Task values WIN (device class provides defaults only) + // Examples: schedulerName, dnsPolicy, hostNetwork + // - Slice fields: Appended (device class values + task values) + // Examples: tolerations, volumes, env vars + // - Map fields: Merged (union with task values winning on conflicts) + // Examples: nodeSelector, labels, annotations + // + // Container (and init container) template support: + // - Containers named "default" provide defaults for ALL containers + // - Containers named "primary" provide defaults for the primary container only + // - Primary template is applied after default (primary wins for conflicts) + // - Containers with other names are merged into containers in the base PodSpec with the same name + // - Uses the same container template merging as base PodTemplates + PodTemplate *v1.PodTemplate `json:"pod-template" pflag:"-,PodTemplate providing defaults for this accelerator device class."` +} + +// GetK8sPluginConfig retrieves the current k8s plugin config or default. +func GetK8sPluginConfig() *K8sPluginConfig { + cfg := K8sPluginConfigSection.GetConfig().(*K8sPluginConfig) + + // Viper casts all keys in YAML configs to lowercase, but all the accelerator device classes should be uppercase. + // See: https://github.com/spf13/viper/issues/260 + acceleratorDeviceClasses := make(map[string]AcceleratorDeviceClassConfig) + for key, value := range cfg.AcceleratorDeviceClasses { + acceleratorDeviceClasses[strings.ToUpper(key)] = value + } + cfg.AcceleratorDeviceClasses = acceleratorDeviceClasses + + return cfg +} + +// SetK8sPluginConfig should be used for TESTING ONLY, It Sets current value for the config. +func SetK8sPluginConfig(cfg *K8sPluginConfig) error { + return K8sPluginConfigSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config_test.go new file mode 100644 index 0000000000..eb3429774d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/config_test.go @@ -0,0 +1,12 @@ +package config + +import ( + "testing" + + "gotest.tools/assert" +) + +func TestGetK8sPluginConfig(t *testing.T) { + assert.Equal(t, GetK8sPluginConfig().DefaultCPURequest, defaultCPURequest) + assert.Equal(t, GetK8sPluginConfig().DefaultMemoryRequest, defaultMemoryRequest) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go new file mode 100755 index 0000000000..6d46d83f92 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags.go @@ -0,0 +1,75 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (K8sPluginConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (K8sPluginConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (K8sPluginConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in K8sPluginConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg K8sPluginConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("K8sPluginConfig", pflag.ExitOnError) + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "inject-finalizer"), defaultK8sConfig.InjectFinalizer, "Instructs the plugin to inject a finalizer on startTask and remove it on task termination.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-cpus"), defaultK8sConfig.DefaultCPURequest.String(), "Defines a default value for cpu for containers if not specified.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-memory"), defaultK8sConfig.DefaultMemoryRequest.String(), "Defines a default value for memory for containers if not specified.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "scheduler-name"), defaultK8sConfig.SchedulerName, "Defines scheduler name.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.name"), defaultK8sConfig.CoPilot.NamePrefix, "Flyte co-pilot sidecar container name prefix. (additional bits will be added after this)") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.image"), defaultK8sConfig.CoPilot.Image, "Flyte co-pilot Docker Image FQN") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.default-input-path"), defaultK8sConfig.CoPilot.DefaultInputDataPath, "Default path where the volume should be mounted") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.default-output-path"), defaultK8sConfig.CoPilot.DefaultOutputPath, "Default path where the volume should be mounted") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.input-vol-name"), defaultK8sConfig.CoPilot.InputVolumeName, "Name of the data volume that is created for storing inputs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.output-vol-name"), defaultK8sConfig.CoPilot.OutputVolumeName, "Name of the data volume that is created for storing outputs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.cpu"), defaultK8sConfig.CoPilot.CPU, "Used to set cpu for co-pilot containers") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.memory"), defaultK8sConfig.CoPilot.Memory, "Used to set memory for co-pilot containers") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "co-pilot.storage"), defaultK8sConfig.CoPilot.Storage, "Default storage limit for individual inputs / outputs") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "delete-resource-on-finalize"), defaultK8sConfig.DeleteResourceOnFinalize, "Instructs the system to delete the resource upon successful execution of a k8s pod rather than have the k8s garbage collector clean it up. This ensures that no resources are kept around (potentially consuming cluster resources). This, however, will cause k8s log links to expire as soon as the resource is finalized.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-name"), defaultK8sConfig.DefaultPodTemplateName, "Name of the PodTemplate to use as the base for all k8s pods created by FlytePropeller.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "default-pod-template-resync"), defaultK8sConfig.DefaultPodTemplateResync.String(), "Frequency of resyncing default pod templates") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "send-object-events"), defaultK8sConfig.SendObjectEvents, "If true, will send k8s object events in TaskExecutionEvent updates.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-base-backoff-duration"), defaultK8sConfig.UpdateBaseBackoffDuration, "Initial delay in exponential backoff when updating a resource in milliseconds.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "update-backoff-retries"), defaultK8sConfig.UpdateBackoffRetries, "Number of retries for exponential backoff when updating a resource.") + cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "add-tolerations-for-extended-resources"), defaultK8sConfig.AddTolerationsForExtendedResources, "Name of the extended resources for which tolerations should be added.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "disable-inject-owner-references"), defaultK8sConfig.DisableInjectOwnerReferences, "Override to not set owner references on k8s resources. This is useful for V2 node execution") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go new file mode 100755 index 0000000000..eefd2f2b58 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/config/k8spluginconfig_flags_test.go @@ -0,0 +1,396 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package config + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsK8sPluginConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementK8sPluginConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsK8sPluginConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookK8sPluginConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementK8sPluginConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_K8sPluginConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookK8sPluginConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_K8sPluginConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_K8sPluginConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_K8sPluginConfig(val, result)) +} + +func testDecodeRaw_K8sPluginConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_K8sPluginConfig(vStringSlice, result)) +} + +func TestK8sPluginConfig_GetPFlagSet(t *testing.T) { + val := K8sPluginConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestK8sPluginConfig_SetFlags(t *testing.T) { + actual := K8sPluginConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_inject-finalizer", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("inject-finalizer", testValue) + if vBool, err := cmdFlags.GetBool("inject-finalizer"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vBool), &actual.InjectFinalizer) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_default-cpus", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultK8sConfig.DefaultCPURequest.String() + + cmdFlags.Set("default-cpus", testValue) + if vString, err := cmdFlags.GetString("default-cpus"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.DefaultCPURequest) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_default-memory", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultK8sConfig.DefaultMemoryRequest.String() + + cmdFlags.Set("default-memory", testValue) + if vString, err := cmdFlags.GetString("default-memory"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.DefaultMemoryRequest) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_scheduler-name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("scheduler-name", testValue) + if vString, err := cmdFlags.GetString("scheduler-name"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.SchedulerName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.name", testValue) + if vString, err := cmdFlags.GetString("co-pilot.name"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.NamePrefix) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.image", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.image", testValue) + if vString, err := cmdFlags.GetString("co-pilot.image"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.Image) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.default-input-path", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.default-input-path", testValue) + if vString, err := cmdFlags.GetString("co-pilot.default-input-path"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.DefaultInputDataPath) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.default-output-path", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.default-output-path", testValue) + if vString, err := cmdFlags.GetString("co-pilot.default-output-path"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.DefaultOutputPath) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.input-vol-name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.input-vol-name", testValue) + if vString, err := cmdFlags.GetString("co-pilot.input-vol-name"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.InputVolumeName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.output-vol-name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.output-vol-name", testValue) + if vString, err := cmdFlags.GetString("co-pilot.output-vol-name"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.OutputVolumeName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.cpu", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.cpu", testValue) + if vString, err := cmdFlags.GetString("co-pilot.cpu"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.CPU) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.memory", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.memory", testValue) + if vString, err := cmdFlags.GetString("co-pilot.memory"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.Memory) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_co-pilot.storage", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("co-pilot.storage", testValue) + if vString, err := cmdFlags.GetString("co-pilot.storage"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.CoPilot.Storage) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_delete-resource-on-finalize", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("delete-resource-on-finalize", testValue) + if vBool, err := cmdFlags.GetBool("delete-resource-on-finalize"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vBool), &actual.DeleteResourceOnFinalize) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_default-pod-template-name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("default-pod-template-name", testValue) + if vString, err := cmdFlags.GetString("default-pod-template-name"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.DefaultPodTemplateName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_default-pod-template-resync", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultK8sConfig.DefaultPodTemplateResync.String() + + cmdFlags.Set("default-pod-template-resync", testValue) + if vString, err := cmdFlags.GetString("default-pod-template-resync"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vString), &actual.DefaultPodTemplateResync) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_send-object-events", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("send-object-events", testValue) + if vBool, err := cmdFlags.GetBool("send-object-events"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vBool), &actual.SendObjectEvents) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_update-base-backoff-duration", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-base-backoff-duration", testValue) + if vInt, err := cmdFlags.GetInt("update-base-backoff-duration"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBaseBackoffDuration) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_update-backoff-retries", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("update-backoff-retries", testValue) + if vInt, err := cmdFlags.GetInt("update-backoff-retries"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vInt), &actual.UpdateBackoffRetries) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_add-tolerations-for-extended-resources", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := join_K8sPluginConfig(defaultK8sConfig.AddTolerationsForExtendedResources, ",") + + cmdFlags.Set("add-tolerations-for-extended-resources", testValue) + if vStringSlice, err := cmdFlags.GetStringSlice("add-tolerations-for-extended-resources"); err == nil { + testDecodeRaw_K8sPluginConfig(t, join_K8sPluginConfig(vStringSlice, ","), &actual.AddTolerationsForExtendedResources) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_disable-inject-owner-references", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("disable-inject-owner-references", testValue) + if vBool, err := cmdFlags.GetBool("disable-inject-owner-references"); err == nil { + testDecodeJson_K8sPluginConfig(t, fmt.Sprintf("%v", vBool), &actual.DisableInjectOwnerReferences) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go new file mode 100644 index 0000000000..a7f8d1adff --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper.go @@ -0,0 +1,358 @@ +package flytek8s + +import ( + "context" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/validation" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginscore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const resourceGPU = "gpu" + +// ResourceNvidiaGPU is the name of the Nvidia GPU resource. +// Copied from: k8s.io/autoscaler/cluster-autoscaler/utils/gpu/gpu.go +const ResourceNvidiaGPU = "nvidia.com/gpu" + +// Specifies whether resource resolution should assign unset resource requests or limits from platform defaults +// or existing container values. +const assignIfUnset = true + +func MergeResources(in v1.ResourceRequirements, out *v1.ResourceRequirements) { + if out.Limits == nil { + out.Limits = in.Limits + } else if in.Limits != nil { + for key, val := range in.Limits { + out.Limits[key] = val + } + } + if out.Requests == nil { + out.Requests = in.Requests + } else if in.Requests != nil { + for key, val := range in.Requests { + out.Requests[key] = val + } + } +} + +type ResourceRequirement struct { + Request resource.Quantity + Limit resource.Quantity +} + +func resolvePlatformDefaults(platformResources v1.ResourceRequirements, configCPU, configMemory resource.Quantity) v1.ResourceRequirements { + if len(platformResources.Requests) == 0 { + platformResources.Requests = make(v1.ResourceList) + } + + if _, ok := platformResources.Requests[v1.ResourceCPU]; !ok { + platformResources.Requests[v1.ResourceCPU] = configCPU + } + + if _, ok := platformResources.Requests[v1.ResourceMemory]; !ok { + platformResources.Requests[v1.ResourceMemory] = configMemory + } + + if len(platformResources.Limits) == 0 { + platformResources.Limits = make(v1.ResourceList) + } + + return platformResources +} + +// AdjustOrDefaultResource validates resources conform to platform limits and assigns defaults for Request and Limit values by +// using the Request when the Limit is unset, and vice versa. +func AdjustOrDefaultResource(request, limit, platformDefault, platformLimit resource.Quantity) ResourceRequirement { + if request.IsZero() { + if !limit.IsZero() { + request = limit + } else { + request = platformDefault + } + } + + if limit.IsZero() { + limit = request + } + + return ensureResourceRange(request, limit, platformLimit) +} + +func ensureResourceLimit(value, limit resource.Quantity) resource.Quantity { + if value.IsZero() || limit.IsZero() { + return value + } + + if value.Cmp(limit) == 1 { + return limit + } + + return value +} + +// ensureResourceRange doesn't assign resources unless they need to be adjusted downwards +func ensureResourceRange(request, limit, platformLimit resource.Quantity) ResourceRequirement { + // Ensure request is < platformLimit + request = ensureResourceLimit(request, platformLimit) + // Ensure limit is < platformLimit + limit = ensureResourceLimit(limit, platformLimit) + // Ensure request is < limit + request = ensureResourceLimit(request, limit) + + return ResourceRequirement{ + Request: request, + Limit: limit, + } +} + +func adjustResourceRequirement(resourceName v1.ResourceName, resourceRequirements, + platformResources v1.ResourceRequirements, assignIfUnset bool) { + + var resourceValue ResourceRequirement + if assignIfUnset { + resourceValue = AdjustOrDefaultResource(resourceRequirements.Requests[resourceName], + resourceRequirements.Limits[resourceName], platformResources.Requests[resourceName], + platformResources.Limits[resourceName]) + } else { + resourceValue = ensureResourceRange(resourceRequirements.Requests[resourceName], + resourceRequirements.Limits[resourceName], platformResources.Limits[resourceName]) + } + + resourceRequirements.Requests[resourceName] = resourceValue.Request + resourceRequirements.Limits[resourceName] = resourceValue.Limit +} + +// SanitizeGPUResourceRequirements converts generic 'gpu' resource requirements to the desired accelerator resource name. +func SanitizeGPUResourceRequirements(resources *v1.ResourceRequirements, accelerator *core.GPUAccelerator) { + resourceName := config.GetK8sPluginConfig().GpuResourceName + if accelerator != nil { + resourceName = getAcceleratorResourceName(accelerator) + } + + if res, found := resources.Requests[resourceGPU]; found { + resources.Requests[resourceName] = res + delete(resources.Requests, resourceGPU) + } + + if res, found := resources.Limits[resourceGPU]; found { + resources.Limits[resourceName] = res + delete(resources.Limits, resourceGPU) + } +} + +// ApplyResourceOverrides handles resource resolution, allocation and validation. Primarily, it ensures that container +// resources do not exceed defined platformResource limits and in the case of assignIfUnset, ensures that limits and +// requests are sensibly set for resources of all types. +func ApplyResourceOverrides(resources, platformResources v1.ResourceRequirements, assignIfUnset bool) v1.ResourceRequirements { + if len(resources.Requests) == 0 { + resources.Requests = make(v1.ResourceList) + } + + if len(resources.Limits) == 0 { + resources.Limits = make(v1.ResourceList) + } + + // As a fallback, in the case the Flyte workflow object does not have platformResource defaults set, the defaults + // come from the plugin config. + platformResources = resolvePlatformDefaults(platformResources, config.GetK8sPluginConfig().DefaultCPURequest, + config.GetK8sPluginConfig().DefaultMemoryRequest) + + adjustResourceRequirement(v1.ResourceCPU, resources, platformResources, assignIfUnset) + adjustResourceRequirement(v1.ResourceMemory, resources, platformResources, assignIfUnset) + + _, ephemeralStorageRequested := resources.Requests[v1.ResourceEphemeralStorage] + _, ephemeralStorageLimited := resources.Limits[v1.ResourceEphemeralStorage] + + if ephemeralStorageRequested || ephemeralStorageLimited { + adjustResourceRequirement(v1.ResourceEphemeralStorage, resources, platformResources, assignIfUnset) + } + + // TODO: Make configurable. 1/15/2019 Flyte Cluster doesn't support setting storage requests/limits. + // https://github.com/kubernetes/enhancements/issues/362 + + // Check for accelerator resources (GPU, TPU, Neuron, etc.) + acceleratorResourceNames := getAllAcceleratorResourceNames() + for acceleratorResourceName := range acceleratorResourceNames { + _, requested := resources.Requests[acceleratorResourceName] + _, limited := resources.Limits[acceleratorResourceName] + if requested || limited { + adjustResourceRequirement(acceleratorResourceName, resources, platformResources, assignIfUnset) + } + } + + return resources +} + +// BuildRawContainer constructs a Container based on the definition passed by the TaskExecutionContext. +func BuildRawContainer(ctx context.Context, tCtx pluginscore.TaskExecutionContext) (*v1.Container, error) { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + logger.Warnf(ctx, "failed to read task information when trying to construct container, err: %s", err.Error()) + return nil, err + } + + // validate arguments + taskContainer := taskTemplate.GetContainer() + if taskContainer == nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "unable to create container with no definition in TaskTemplate") + } + if tCtx.TaskExecutionMetadata().GetOverrides() == nil || tCtx.TaskExecutionMetadata().GetOverrides().GetResources() == nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "resource requirements not found for container task, required!") + } + + // Make the container name the same as the pod name, unless it violates K8s naming conventions + // Container names are subject to the DNS-1123 standard + containerName := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + if errs := validation.IsDNS1123Label(containerName); len(errs) > 0 { + containerName = rand.String(4) + } + + res, err := ToK8sResourceRequirements(taskContainer.Resources) + if err != nil { + return nil, err + } + + container := &v1.Container{ + Name: containerName, + Image: taskContainer.GetImage(), + Args: taskContainer.GetArgs(), + Command: taskContainer.GetCommand(), + Env: ToK8sEnvVar(taskContainer.GetEnv()), + TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, + Resources: *res, + ImagePullPolicy: config.GetK8sPluginConfig().ImagePullPolicy, + } + + return container, nil +} + +// ToK8sContainer builds a Container based on the definition passed by the TaskExecutionContext. This involves applying +// all Flyte configuration including k8s plugins and resource requests. +func ToK8sContainer(ctx context.Context, tCtx pluginscore.TaskExecutionContext) (*v1.Container, error) { + // build raw container + container, err := BuildRawContainer(ctx, tCtx) + if err != nil { + return nil, err + } + + // extract task template and extended resources + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return nil, err + } + + extendedResources := ApplyExtendedResourcesOverrides( + taskTemplate.GetExtendedResources(), + tCtx.TaskExecutionMetadata().GetOverrides().GetExtendedResources(), + ) + + if container.SecurityContext == nil && config.GetK8sPluginConfig().DefaultSecurityContext != nil { + container.SecurityContext = config.GetK8sPluginConfig().DefaultSecurityContext.DeepCopy() + } + + // add flyte resource customizations to the container + templateParameters := template.Parameters{ + TaskExecMetadata: tCtx.TaskExecutionMetadata(), + Inputs: tCtx.InputReader(), + OutputPath: tCtx.OutputWriter(), + Task: tCtx.TaskReader(), + } + + if err := AddFlyteCustomizationsToContainer(ctx, templateParameters, ResourceCustomizationModeMergeExistingResources, container, extendedResources); err != nil { + return nil, err + } + + return container, nil +} + +//go:generate enumer -type=ResourceCustomizationMode -trimprefix=ResourceCustomizationMode + +type ResourceCustomizationMode int + +const ( + // ResourceCustomizationModeAssignResources is used for container tasks where resources are validated and assigned if necessary. + ResourceCustomizationModeAssignResources ResourceCustomizationMode = iota + // ResourceCustomizationModeMergeExistingResources is used for primary containers in pod tasks where container requests and limits are + // merged, validated and assigned if necessary. + ResourceCustomizationModeMergeExistingResources + // ResourceCustomizationModeEnsureExistingResourcesInRange is used for secondary containers in pod tasks where requests and limits are only + // adjusted if needed (downwards). + ResourceCustomizationModeEnsureExistingResourcesInRange +) + +// AddFlyteCustomizationsToContainer takes a container definition which specifies how to run a Flyte task and fills in +// templated command and argument values, updates resources and decorates environment variables with platform and +// task-specific customizations. +func AddFlyteCustomizationsToContainer(ctx context.Context, parameters template.Parameters, + mode ResourceCustomizationMode, container *v1.Container, extendedResources *core.ExtendedResources) error { + modifiedCommand, err := template.Render(ctx, container.Command, parameters) + if err != nil { + return err + } + container.Command = modifiedCommand + + modifiedArgs, err := template.Render(ctx, container.Args, parameters) + if err != nil { + return err + } + container.Args = modifiedArgs + + // The flyteconsole url is added based on the `IncludeConsoleURL` bit set via the task template + consoleURL := "" + if parameters.IncludeConsoleURL { + consoleURL = parameters.TaskExecMetadata.GetConsoleURL() + } + container.Env, container.EnvFrom = DecorateEnvVars(ctx, container.Env, container.EnvFrom, parameters.TaskExecMetadata.GetEnvironmentVariables(), parameters.TaskExecMetadata.GetTaskExecutionID(), consoleURL) + + // Sanitize base container GPU resource requirements + // Overrides for extendedResources have already been applied at this point + SanitizeGPUResourceRequirements(&container.Resources, extendedResources.GetGpuAccelerator()) + + // retrieve platformResources and overrideResources to use when aggregating container resources + platformResources := parameters.TaskExecMetadata.GetPlatformResources().DeepCopy() + if platformResources == nil { + platformResources = &v1.ResourceRequirements{} + } + + var overrideResources *v1.ResourceRequirements + if parameters.TaskExecMetadata.GetOverrides() != nil && parameters.TaskExecMetadata.GetOverrides().GetResources() != nil { + overrideResources = parameters.TaskExecMetadata.GetOverrides().GetResources().DeepCopy() + } + + if overrideResources == nil { + overrideResources = &v1.ResourceRequirements{} + } else { + // Sanitize override resource requirements + SanitizeGPUResourceRequirements(overrideResources, extendedResources.GetGpuAccelerator()) + } + + logger.Infof(ctx, "ApplyResourceOverrides with Resources [%v], Platform Resources [%v] and Container"+ + " Resources [%v] with mode [%v]", overrideResources, platformResources, container.Resources, mode) + + switch mode { + case ResourceCustomizationModeAssignResources: + // this will use overrideResources to set container resources and fallback to the platformResource values. + // it is important to note that this ignores the existing container.Resources values. + container.Resources = ApplyResourceOverrides(*overrideResources, *platformResources, assignIfUnset) + case ResourceCustomizationModeMergeExistingResources: + // this merges the overrideResources on top of the existing container.Resources to apply the overrides, then it + // uses the platformResource values to set defaults for any missing resource. + MergeResources(*overrideResources, &container.Resources) + container.Resources = ApplyResourceOverrides(container.Resources, *platformResources, assignIfUnset) + case ResourceCustomizationModeEnsureExistingResourcesInRange: + // this use the platformResources defaults to ensure that the container.Resources values are within the + // platformResources limits. it will not override any existing container.Resources values. + container.Resources = ApplyResourceOverrides(container.Resources, *platformResources, !assignIfUnset) + } + + logger.Infof(ctx, "Adjusted container resources [%v]", container.Resources) + return nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go new file mode 100644 index 0000000000..f18ec63c93 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/container_helper_test.go @@ -0,0 +1,1103 @@ +package flytek8s + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/util/validation" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + mocks2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var zeroQuantity = resource.MustParse("0") + +func TestAssignResource(t *testing.T) { + t.Run("Leave valid requests and limits unchanged", func(t *testing.T) { + res := AdjustOrDefaultResource( + resource.MustParse("1"), resource.MustParse("2"), + resource.MustParse("10"), resource.MustParse("20")) + assert.True(t, res.Request.Equal(resource.MustParse("1"))) + assert.True(t, res.Limit.Equal(resource.MustParse("2"))) + }) + t.Run("Assign unset Request from Limit", func(t *testing.T) { + res := AdjustOrDefaultResource( + zeroQuantity, resource.MustParse("2"), + resource.MustParse("10"), resource.MustParse("20")) + assert.True(t, res.Request.Equal(resource.MustParse("2"))) + assert.True(t, res.Limit.Equal(resource.MustParse("2"))) + }) + t.Run("Assign unset Limit from Request", func(t *testing.T) { + res := AdjustOrDefaultResource( + resource.MustParse("2"), zeroQuantity, + resource.MustParse("10"), resource.MustParse("20")) + assert.Equal(t, resource.MustParse("2"), res.Request) + assert.Equal(t, resource.MustParse("2"), res.Limit) + }) + t.Run("Assign from platform defaults", func(t *testing.T) { + res := AdjustOrDefaultResource( + zeroQuantity, zeroQuantity, + resource.MustParse("10"), resource.MustParse("20")) + assert.Equal(t, resource.MustParse("10"), res.Request) + assert.Equal(t, resource.MustParse("10"), res.Limit) + }) + t.Run("Adjust Limit when Request > Limit", func(t *testing.T) { + res := AdjustOrDefaultResource( + resource.MustParse("10"), resource.MustParse("2"), + resource.MustParse("10"), resource.MustParse("20")) + assert.Equal(t, resource.MustParse("2"), res.Request) + assert.Equal(t, resource.MustParse("2"), res.Limit) + }) + t.Run("Adjust Limit > platformLimit", func(t *testing.T) { + res := AdjustOrDefaultResource( + resource.MustParse("1"), resource.MustParse("40"), + resource.MustParse("10"), resource.MustParse("20")) + assert.True(t, res.Request.Equal(resource.MustParse("1"))) + assert.True(t, res.Limit.Equal(resource.MustParse("20"))) + }) + t.Run("Adjust Request, Limit > platformLimit", func(t *testing.T) { + res := AdjustOrDefaultResource( + resource.MustParse("40"), resource.MustParse("50"), + resource.MustParse("10"), resource.MustParse("20")) + assert.True(t, res.Request.Equal(resource.MustParse("20"))) + assert.True(t, res.Limit.Equal(resource.MustParse("20"))) + }) +} + +func TestValidateResource(t *testing.T) { + platformLimit := resource.MustParse("5") + t.Run("adjust when Request > Limit", func(t *testing.T) { + res := ensureResourceRange(resource.MustParse("4"), resource.MustParse("3"), platformLimit) + assert.True(t, res.Request.Equal(resource.MustParse("3"))) + assert.True(t, res.Limit.Equal(resource.MustParse("3"))) + }) + t.Run("adjust when Request > platformLimit", func(t *testing.T) { + res := ensureResourceRange(resource.MustParse("6"), platformLimit, platformLimit) + assert.True(t, res.Request.Equal(platformLimit)) + assert.True(t, res.Limit.Equal(platformLimit)) + }) + t.Run("adjust when Limit > platformLimit", func(t *testing.T) { + res := ensureResourceRange(resource.MustParse("4"), resource.MustParse("6"), platformLimit) + assert.True(t, res.Request.Equal(resource.MustParse("4"))) + assert.True(t, res.Limit.Equal(platformLimit)) + }) + t.Run("nothing to do", func(t *testing.T) { + res := ensureResourceRange(resource.MustParse("1"), resource.MustParse("2"), platformLimit) + assert.True(t, res.Request.Equal(resource.MustParse("1"))) + assert.True(t, res.Limit.Equal(resource.MustParse("2"))) + }) +} + +func TestApplyResourceOverrides_OverrideCpu(t *testing.T) { + platformRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("3"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("10"), + }, + } + cpuRequest := resource.MustParse("1") + overrides := ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: cpuRequest, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, cpuRequest, overrides.Requests[v1.ResourceCPU]) + assert.EqualValues(t, cpuRequest, overrides.Limits[v1.ResourceCPU]) + + cpuLimit := resource.MustParse("2") + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: cpuRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: cpuLimit, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, cpuRequest, overrides.Requests[v1.ResourceCPU]) + assert.EqualValues(t, cpuLimit, overrides.Limits[v1.ResourceCPU]) + + // Request equals Limit if not set + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: cpuLimit, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, cpuLimit, overrides.Requests[v1.ResourceCPU]) + assert.EqualValues(t, cpuLimit, overrides.Limits[v1.ResourceCPU]) +} + +func TestApplyResourceOverrides_OverrideMemory(t *testing.T) { + memoryRequest := resource.MustParse("1") + platformRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("3"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("10"), + }, + } + overrides := ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: memoryRequest, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, memoryRequest, overrides.Requests[v1.ResourceMemory]) + assert.EqualValues(t, memoryRequest, overrides.Limits[v1.ResourceMemory]) + + memoryLimit := resource.MustParse("2") + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: memoryRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: memoryLimit, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, memoryRequest, overrides.Requests[v1.ResourceMemory]) + assert.EqualValues(t, memoryLimit, overrides.Limits[v1.ResourceMemory]) + + // Request equals Limit if not set + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceMemory: memoryLimit, + }, + }, platformRequirements, assignIfUnset) + assert.EqualValues(t, memoryLimit, overrides.Requests[v1.ResourceMemory]) + assert.EqualValues(t, memoryLimit, overrides.Limits[v1.ResourceMemory]) +} + +func TestApplyResourceOverrides_OverrideEphemeralStorage(t *testing.T) { + ephemeralStorageRequest := resource.MustParse("1") + overrides := ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceEphemeralStorage: ephemeralStorageRequest, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, ephemeralStorageRequest, overrides.Requests[v1.ResourceEphemeralStorage]) + assert.EqualValues(t, ephemeralStorageRequest, overrides.Limits[v1.ResourceEphemeralStorage]) + + ephemeralStorageLimit := resource.MustParse("2") + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceEphemeralStorage: ephemeralStorageRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceEphemeralStorage: ephemeralStorageLimit, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, ephemeralStorageRequest, overrides.Requests[v1.ResourceEphemeralStorage]) + assert.EqualValues(t, ephemeralStorageLimit, overrides.Limits[v1.ResourceEphemeralStorage]) + + // Request equals Limit if not set + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceEphemeralStorage: ephemeralStorageLimit, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, ephemeralStorageLimit, overrides.Requests[v1.ResourceEphemeralStorage]) +} + +func TestApplyResourceOverrides_RemoveStorage(t *testing.T) { + requestedResourceQuantity := resource.MustParse("1") + overrides := ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, overrides.Requests) + + assert.EqualValues(t, v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, overrides.Limits) +} + +func TestApplyResourceOverrides_OverrideGpu(t *testing.T) { + gpuRequest := resource.MustParse("1") + overrides := ApplyResourceOverrides(v1.ResourceRequirements{ + Requests: v1.ResourceList{ + ResourceNvidiaGPU: gpuRequest, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, gpuRequest, overrides.Requests[ResourceNvidiaGPU]) + + overrides = ApplyResourceOverrides(v1.ResourceRequirements{ + Limits: v1.ResourceList{ + ResourceNvidiaGPU: gpuRequest, + }, + }, v1.ResourceRequirements{}, assignIfUnset) + assert.EqualValues(t, gpuRequest, overrides.Limits[ResourceNvidiaGPU]) +} + +func TestSanitizeGPUResourceRequirements(t *testing.T) { + t.Run("nil accelerator defaults to NVIDIA GPU", func(t *testing.T) { + gpuRequest := resource.MustParse("4") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: gpuRequest, + }, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + ResourceNvidiaGPU: gpuRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, nil) + assert.EqualValues(t, expectedRequirements, requirements) + }) + + t.Run("NVIDIA_GPU device class", func(t *testing.T) { + gpuRequest := resource.MustParse("2") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: gpuRequest, + }, + Limits: v1.ResourceList{ + resourceGPU: gpuRequest, + }, + } + + accelerator := &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("nvidia.com/gpu"): gpuRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceName("nvidia.com/gpu"): gpuRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, accelerator) + assert.EqualValues(t, expectedRequirements, requirements) + }) + + t.Run("GOOGLE_TPU device class", func(t *testing.T) { + tpuRequest := resource.MustParse("4") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: tpuRequest, + }, + Limits: v1.ResourceList{ + resourceGPU: tpuRequest, + }, + } + + accelerator := &core.GPUAccelerator{ + Device: "tpu-v4", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("google.com/tpu"): tpuRequest, + }, + Limits: v1.ResourceList{ + v1.ResourceName("google.com/tpu"): tpuRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, accelerator) + assert.EqualValues(t, expectedRequirements, requirements) + }) + + t.Run("AMAZON_NEURON device class", func(t *testing.T) { + neuronRequest := resource.MustParse("1") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: neuronRequest, + }, + } + + accelerator := &core.GPUAccelerator{ + Device: "inferentia2", + DeviceClass: core.GPUAccelerator_AMAZON_NEURON, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("aws.amazon.com/neuron"): neuronRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, accelerator) + assert.EqualValues(t, expectedRequirements, requirements) + }) + + t.Run("AMD_GPU device class", func(t *testing.T) { + gpuRequest := resource.MustParse("1") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: gpuRequest, + }, + } + + accelerator := &core.GPUAccelerator{ + Device: "amd-mi250", + DeviceClass: core.GPUAccelerator_AMD_GPU, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("amd.com/gpu"): gpuRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, accelerator) + assert.EqualValues(t, expectedRequirements, requirements) + }) + + t.Run("HABANA_GAUDI device class", func(t *testing.T) { + gpuRequest := resource.MustParse("1") + requirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: gpuRequest, + }, + } + + accelerator := &core.GPUAccelerator{ + Device: "habana-gaudi-dl1", + DeviceClass: core.GPUAccelerator_HABANA_GAUDI, + } + + expectedRequirements := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceName("habana.ai/gaudi"): gpuRequest, + }, + } + + SanitizeGPUResourceRequirements(&requirements, accelerator) + assert.EqualValues(t, expectedRequirements, requirements) + }) +} + +func TestMergeResources_EmptyIn(t *testing.T) { + requestedResourceQuantity := resource.MustParse("1") + expectedResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + } + outResources := expectedResources.DeepCopy() + MergeResources(v1.ResourceRequirements{}, outResources) + assert.EqualValues(t, *outResources, expectedResources) +} + +func TestMergeResources_EmptyOut(t *testing.T) { + requestedResourceQuantity := resource.MustParse("1") + expectedResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + }, + } + outResources := v1.ResourceRequirements{} + MergeResources(expectedResources, &outResources) + assert.EqualValues(t, outResources, expectedResources) +} + +func TestMergeResources_PartialRequirements(t *testing.T) { + requestedResourceQuantity := resource.MustParse("1") + resourceList := v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + } + inResources := v1.ResourceRequirements{Requests: resourceList} + outResources := v1.ResourceRequirements{Limits: resourceList} + MergeResources(inResources, &outResources) + assert.EqualValues(t, outResources, v1.ResourceRequirements{ + Requests: resourceList, + Limits: resourceList, + }) +} + +func TestMergeResources_PartialResourceKeys(t *testing.T) { + requestedResourceQuantity := resource.MustParse("1") + resourceList1 := v1.ResourceList{ + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + } + resourceList2 := v1.ResourceList{v1.ResourceCPU: requestedResourceQuantity} + expectedResourceList := v1.ResourceList{ + v1.ResourceCPU: requestedResourceQuantity, + v1.ResourceMemory: requestedResourceQuantity, + v1.ResourceEphemeralStorage: requestedResourceQuantity, + } + inResources := v1.ResourceRequirements{ + Requests: resourceList1, + Limits: resourceList2, + } + outResources := v1.ResourceRequirements{ + Requests: resourceList2, + Limits: resourceList1, + } + MergeResources(inResources, &outResources) + assert.EqualValues(t, outResources, v1.ResourceRequirements{ + Requests: expectedResourceList, + Limits: expectedResourceList, + }) +} + +func TestToK8sContainer(t *testing.T) { + taskTemplate := &core.TaskTemplate{ + Type: "test", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: "myimage", + Args: []string{ + "arg1", + "arg2", + "arg3", + }, + Command: []string{ + "com1", + "com2", + "com3", + }, + Env: []*core.KeyValuePair{ + { + Key: "k", + Value: "v", + }, + }, + }, + }, + } + + taskReader := &mocks.TaskReader{} + taskReader.On("Read", mock.Anything).Return(taskTemplate, nil) + + inputReader := &mocks2.InputReader{} + inputReader.OnGetInputPath().Return(storage.DataReference("test-data-reference")) + inputReader.OnGetInputPrefixPath().Return(storage.DataReference("test-data-reference-prefix")) + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + + outputWriter := &mocks2.OutputWriter{} + outputWriter.OnGetOutputPrefixPath().Return("") + outputWriter.OnGetRawOutputPrefix().Return("") + outputWriter.OnGetCheckpointPrefix().Return("/checkpoint") + outputWriter.OnGetPreviousCheckpointsPrefix().Return("/prev") + + mockTaskExecMetadata := mocks.TaskExecutionMetadata{} + mockTaskOverrides := mocks.TaskOverrides{} + mockTaskOverrides.OnGetResources().Return(&v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceEphemeralStorage: resource.MustParse("1024Mi"), + }, + }) + mockTaskOverrides.OnGetExtendedResources().Return(nil) + mockTaskExecMetadata.OnGetOverrides().Return(&mockTaskOverrides) + mockTaskExecutionID := mocks.TaskExecutionID{} + mockTaskExecutionID.OnGetID().Return(core.TaskExecutionIdentifier{}) + mockTaskExecutionID.OnGetGeneratedName().Return("gen_name") + mockTaskExecMetadata.OnGetTaskExecutionID().Return(&mockTaskExecutionID) + mockTaskExecMetadata.OnGetPlatformResources().Return(&v1.ResourceRequirements{}) + mockTaskExecMetadata.OnGetEnvironmentVariables().Return(map[string]string{ + "foo": "bar", + }) + mockTaskExecMetadata.OnGetNamespace().Return("my-namespace") + mockTaskExecMetadata.OnGetConsoleURL().Return("") + + tCtx := &mocks.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(&mockTaskExecMetadata) + tCtx.OnInputReader().Return(inputReader) + tCtx.OnTaskReader().Return(taskReader) + tCtx.OnOutputWriter().Return(outputWriter) + + cfg := config.GetK8sPluginConfig() + allow := false + cfg.DefaultSecurityContext = &v1.SecurityContext{ + AllowPrivilegeEscalation: &allow, + } + assert.NoError(t, config.SetK8sPluginConfig(cfg)) + + container, err := ToK8sContainer(context.TODO(), tCtx) + assert.NoError(t, err) + assert.Equal(t, container.Image, "myimage") + assert.EqualValues(t, []string{ + "arg1", + "arg2", + "arg3", + }, container.Args) + assert.EqualValues(t, []string{ + "com1", + "com2", + "com3", + }, container.Command) + assert.EqualValues(t, []v1.EnvVar{ + { + Name: "k", + Value: "v", + }, + { + Name: "foo", + Value: "bar", + }, + }, container.Env) + errs := validation.IsDNS1123Label(container.Name) + assert.Nil(t, errs) + assert.NotNil(t, container.SecurityContext) + assert.False(t, *container.SecurityContext.AllowPrivilegeEscalation) +} + +func getTemplateParametersForTest(resourceRequirements, platformResources *v1.ResourceRequirements, includeConsoleURL bool, consoleURL string) template.Parameters { + mockTaskExecMetadata := mocks.TaskExecutionMetadata{} + mockTaskExecutionID := mocks.TaskExecutionID{} + mockTaskExecutionID.OnGetUniqueNodeID().Return("unique_node_id") + mockTaskExecutionID.OnGetGeneratedName().Return("gen_name") + mockTaskExecutionID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "p1", + Domain: "d1", + Name: "task_name", + Version: "v1", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + NodeId: "node_id", + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "p2", + Domain: "d2", + Name: "n2", + }, + }, + RetryAttempt: 1, + }) + mockTaskExecMetadata.OnGetTaskExecutionID().Return(&mockTaskExecutionID) + + mockOverrides := mocks.TaskOverrides{} + mockOverrides.OnGetResources().Return(resourceRequirements) + mockTaskExecMetadata.OnGetOverrides().Return(&mockOverrides) + mockTaskExecMetadata.OnGetPlatformResources().Return(platformResources) + mockTaskExecMetadata.OnGetEnvironmentVariables().Return(nil) + mockTaskExecMetadata.OnGetNamespace().Return("my-namespace") + mockTaskExecMetadata.OnGetConsoleURL().Return(consoleURL) + + mockInputReader := mocks2.InputReader{} + mockInputPath := storage.DataReference("s3://input/path") + mockInputReader.OnGetInputPath().Return(mockInputPath) + mockInputReader.OnGetInputPrefixPath().Return(mockInputPath) + mockInputReader.On("Get", mock.Anything).Return(nil, nil) + + mockOutputPath := mocks2.OutputFilePaths{} + mockOutputPathPrefix := storage.DataReference("s3://output/path") + mockOutputPath.OnGetRawOutputPrefix().Return(mockOutputPathPrefix) + mockOutputPath.OnGetOutputPrefixPath().Return(mockOutputPathPrefix) + mockOutputPath.OnGetCheckpointPrefix().Return("/checkpoint") + mockOutputPath.OnGetPreviousCheckpointsPrefix().Return("/prev") + + return template.Parameters{ + TaskExecMetadata: &mockTaskExecMetadata, + Inputs: &mockInputReader, + OutputPath: &mockOutputPath, + IncludeConsoleURL: includeConsoleURL, + } +} + +func TestAddFlyteCustomizationsToContainer(t *testing.T) { + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceEphemeralStorage: resource.MustParse("1024Mi"), + }, + Limits: v1.ResourceList{ + v1.ResourceEphemeralStorage: resource.MustParse("2048Mi"), + }, + }, nil, false, "") + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + } + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeAssignResources, container, nil) + assert.NoError(t, err) + assert.EqualValues(t, container.Args, []string{"s3://output/path"}) + assert.EqualValues(t, container.Command, []string{"s3://input/path"}) + assert.Len(t, container.Resources.Limits, 3) + assert.Len(t, container.Resources.Requests, 3) + assert.Len(t, container.Env, 13) +} + +func TestAddFlyteCustomizationsToContainer_Resources(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("10"), + }, + }, + } + + t.Run("merge requests/limits for pod tasks - primary container", func(t *testing.T) { + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2"), + }, + }, &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("20"), + }, + }, false, "") + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeMergeExistingResources, container, nil) + assert.NoError(t, err) + assert.True(t, container.Resources.Requests.Cpu().Equal(resource.MustParse("1"))) + assert.True(t, container.Resources.Limits.Cpu().Equal(resource.MustParse("10"))) + assert.True(t, container.Resources.Requests.Memory().Equal(resource.MustParse("2"))) + assert.True(t, container.Resources.Limits.Memory().Equal(resource.MustParse("2"))) + }) + t.Run("enforce merge requests/limits for pod tasks - values from task overrides", func(t *testing.T) { + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("200"), + }, + }, &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + v1.ResourceMemory: resource.MustParse("20"), + }, + }, false, "") + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeMergeExistingResources, container, nil) + assert.NoError(t, err) + assert.True(t, container.Resources.Requests.Cpu().Equal(resource.MustParse("1"))) + assert.True(t, container.Resources.Limits.Cpu().Equal(resource.MustParse("10"))) + assert.True(t, container.Resources.Requests.Memory().Equal(resource.MustParse("2"))) + assert.True(t, container.Resources.Limits.Memory().Equal(resource.MustParse("20"))) + }) + t.Run("enforce requests/limits for pod tasks - values from container", func(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100"), + }, + }, + } + + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{}, &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("10"), + v1.ResourceMemory: resource.MustParse("20"), + }, + }, false, "") + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeMergeExistingResources, container, nil) + assert.NoError(t, err) + assert.True(t, container.Resources.Requests.Cpu().Equal(resource.MustParse("10"))) + assert.True(t, container.Resources.Limits.Cpu().Equal(resource.MustParse("10"))) + assert.True(t, container.Resources.Requests.Memory().Equal(resource.MustParse("2"))) + assert.True(t, container.Resources.Limits.Memory().Equal(resource.MustParse("2"))) + }) + t.Run("ensure gpu resource overriding works for tasks with pod templates", func(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: resource.MustParse("2"), // Tasks with pod templates request resource via the "gpu" key + }, + Limits: v1.ResourceList{ + resourceGPU: resource.MustParse("2"), + }, + }, + } + + overrideRequests := v1.ResourceList{ + ResourceNvidiaGPU: resource.MustParse("4"), // Resource overrides specify the "nvidia.com/gpu" key + } + + overrideLimits := v1.ResourceList{ + ResourceNvidiaGPU: resource.MustParse("4"), + } + + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{ + Requests: overrideRequests, + Limits: overrideLimits, + }, &v1.ResourceRequirements{}, false, "") + + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeMergeExistingResources, container, nil) + assert.NoError(t, err) + assert.Equal(t, container.Resources.Requests[ResourceNvidiaGPU], overrideRequests[ResourceNvidiaGPU]) + assert.Equal(t, container.Resources.Limits[ResourceNvidiaGPU], overrideLimits[ResourceNvidiaGPU]) + }) + t.Run("ensure ExtendedResources.gpu_accelerator.device_class is respected when setting gpu resources", func(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + resourceGPU: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + resourceGPU: resource.MustParse("2"), + }, + }, + } + + tpuExtendedResources := &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "tpu-v4", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + } + + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{}, &v1.ResourceRequirements{}, false, "") + + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeMergeExistingResources, container, tpuExtendedResources) + assert.NoError(t, err) + + // Verify generic "gpu" key is removed + _, hasGenericGPU := container.Resources.Requests[resourceGPU] + assert.False(t, hasGenericGPU) + + // Verify TPU resource is set correctly + expectedTPU := resource.MustParse("2") + assert.Equal(t, expectedTPU, container.Resources.Requests[v1.ResourceName("google.com/tpu")]) + assert.Equal(t, expectedTPU, container.Resources.Limits[v1.ResourceName("google.com/tpu")]) + }) +} + +func TestAddFlyteCustomizationsToContainer_ValidateExistingResources(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("100"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("200"), + }, + }, + } + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{}, &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("10"), + v1.ResourceMemory: resource.MustParse("20"), + }, + }, false, "") + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeEnsureExistingResourcesInRange, container, nil) + assert.NoError(t, err) + + assert.True(t, container.Resources.Requests.Cpu().Equal(resource.MustParse("10"))) + assert.True(t, container.Resources.Limits.Cpu().Equal(resource.MustParse("10"))) +} + +func TestAddFlyteCustomizationsToContainer_GPUResourceOverride(t *testing.T) { + type testCase struct { + name string + initialResources v1.ResourceRequirements + overrideResources v1.ResourceRequirements + extendedResources *core.ExtendedResources + customizationMode ResourceCustomizationMode + expectedRequests v1.ResourceList + expectedLimits v1.ResourceList + } + + tests := []testCase{ + { + name: "override gpu: 1 translates to nvidia.com/gpu", + initialResources: v1.ResourceRequirements{}, + overrideResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{resourceGPU: resource.MustParse("1")}, + Limits: v1.ResourceList{resourceGPU: resource.MustParse("1")}, + }, + extendedResources: nil, + customizationMode: ResourceCustomizationModeAssignResources, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + { + name: "override gpu: 1 with extended resources for TPU", + initialResources: v1.ResourceRequirements{}, + overrideResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{resourceGPU: resource.MustParse("1")}, + }, + extendedResources: &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "tpu-v4", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + }, + customizationMode: ResourceCustomizationModeAssignResources, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceName("google.com/tpu"): resource.MustParse("1"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + v1.ResourceName("google.com/tpu"): resource.MustParse("1"), + }, + }, + { + name: "merge mode - override gpu on container with existing cpu/memory resources", + initialResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("4Gi"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("8Gi"), + }, + }, + overrideResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{resourceGPU: resource.MustParse("2")}, + Limits: v1.ResourceList{resourceGPU: resource.MustParse("2")}, + }, + extendedResources: nil, + customizationMode: ResourceCustomizationModeMergeExistingResources, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("4Gi"), + ResourceNvidiaGPU: resource.MustParse("2"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("8Gi"), + ResourceNvidiaGPU: resource.MustParse("2"), + }, + }, + { + name: "merge mode - override gpu replaces existing gpu in container", + initialResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("4Gi"), + resourceGPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("8Gi"), + resourceGPU: resource.MustParse("1"), + }, + }, + overrideResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{resourceGPU: resource.MustParse("4")}, + Limits: v1.ResourceList{resourceGPU: resource.MustParse("4")}, + }, + extendedResources: nil, + customizationMode: ResourceCustomizationModeMergeExistingResources, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("4Gi"), + ResourceNvidiaGPU: resource.MustParse("4"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("8Gi"), + ResourceNvidiaGPU: resource.MustParse("4"), + }, + }, + { + name: "merge mode - override gpu with TPU on container with existing resources", + initialResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("16Gi"), + resourceGPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("16"), + v1.ResourceMemory: resource.MustParse("32Gi"), + resourceGPU: resource.MustParse("1"), + }, + }, + overrideResources: v1.ResourceRequirements{ + Requests: v1.ResourceList{resourceGPU: resource.MustParse("8")}, + Limits: v1.ResourceList{resourceGPU: resource.MustParse("8")}, + }, + extendedResources: &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "tpu-v5e", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + }, + customizationMode: ResourceCustomizationModeMergeExistingResources, + expectedRequests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("16Gi"), + v1.ResourceName("google.com/tpu"): resource.MustParse("8"), + }, + expectedLimits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("16"), + v1.ResourceMemory: resource.MustParse("32Gi"), + v1.ResourceName("google.com/tpu"): resource.MustParse("8"), + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + container := &v1.Container{ + Command: []string{"{{ .Input }}"}, + Args: []string{"{{ .OutputPrefix }}"}, + Resources: tc.initialResources, + } + + overrideResources := tc.overrideResources + templateParameters := getTemplateParametersForTest( + &overrideResources, + &v1.ResourceRequirements{}, + false, + "", + ) + + err := AddFlyteCustomizationsToContainer( + context.TODO(), + templateParameters, + tc.customizationMode, + container, + tc.extendedResources, + ) + assert.NoError(t, err) + + // Verify requests match exactly + assert.Equal(t, len(tc.expectedRequests), len(container.Resources.Requests), + "requests should have exactly %d resources", len(tc.expectedRequests)) + for resourceName, expectedQuantity := range tc.expectedRequests { + actualQuantity := container.Resources.Requests[resourceName] + assert.True(t, expectedQuantity.Equal(actualQuantity), + "expected %s=%s in requests, got %s", resourceName, expectedQuantity.String(), actualQuantity.String()) + } + + // Verify limits match exactly + assert.Equal(t, len(tc.expectedLimits), len(container.Resources.Limits), + "limits should have exactly %d resources", len(tc.expectedLimits)) + for resourceName, expectedQuantity := range tc.expectedLimits { + actualQuantity := container.Resources.Limits[resourceName] + assert.True(t, expectedQuantity.Equal(actualQuantity), + "expected %s=%s in limits, got %s", resourceName, expectedQuantity.String(), actualQuantity.String()) + } + }) + } +} + +func TestAddFlyteCustomizationsToContainer_ValidateEnvFrom(t *testing.T) { + configMapSource := v1.EnvFromSource{ + ConfigMapRef: &v1.ConfigMapEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-configmap", + }, + }, + } + secretSource := v1.EnvFromSource{ + SecretRef: &v1.SecretEnvSource{ + LocalObjectReference: v1.LocalObjectReference{ + Name: "my-secret", + }, + }, + } + + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + EnvFrom: []v1.EnvFromSource{ + configMapSource, + secretSource, + }, + } + + err := AddFlyteCustomizationsToContainer(context.TODO(), getTemplateParametersForTest(nil, nil, false, ""), ResourceCustomizationModeEnsureExistingResourcesInRange, container, nil) + assert.NoError(t, err) + + assert.Len(t, container.EnvFrom, 2) + assert.Equal(t, container.EnvFrom[0], configMapSource) + assert.Equal(t, container.EnvFrom[1], secretSource) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go new file mode 100644 index 0000000000..67010a1e50 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot.go @@ -0,0 +1,287 @@ +package flytek8s + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + "time" + + "github.com/golang/protobuf/proto" + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + core2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + flyteSidecarContainerName = "uploader" + flyteDownloaderContainerName = "downloader" +) + +func FlyteCoPilotContainer(name string, cfg config.FlyteCoPilotConfig, args []string, volumeMounts ...v1.VolumeMount) (v1.Container, error) { + cpu, err := resource.ParseQuantity(cfg.CPU) + if err != nil { + return v1.Container{}, err + } + + mem, err := resource.ParseQuantity(cfg.Memory) + if err != nil { + return v1.Container{}, err + } + + var storageCfg *storage.Config + if cfg.StorageConfigOverride != nil { + storageCfg = cfg.StorageConfigOverride + } else { + storageCfg = storage.GetConfig() + } + + return v1.Container{ + Name: cfg.NamePrefix + name, + Image: cfg.Image, + Command: CopilotCommandArgs(storageCfg), + Args: args, + WorkingDir: "/", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: cpu, + v1.ResourceMemory: mem, + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: cpu, + v1.ResourceMemory: mem, + }, + }, + VolumeMounts: volumeMounts, + TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError, + ImagePullPolicy: v1.PullIfNotPresent, + }, nil +} + +func CopilotCommandArgs(storageConfig *storage.Config) []string { + var commands = []string{ + "/bin/flyte-copilot", + "--storage.limits.maxDownloadMBs=0", + "--logger.level=" + strconv.Itoa(logger.GetConfig().Level), + } + if storageConfig.MultiContainerEnabled { + commands = append(commands, "--storage.enable-multicontainer") + } + if len(storageConfig.InitContainer) > 0 { + commands = append(commands, fmt.Sprintf("--storage.container=%s", storageConfig.InitContainer)) + + } + commands = append(commands, fmt.Sprintf("--storage.type=%s", storageConfig.Type)) + + if len(storageConfig.Stow.Config) > 0 && len(storageConfig.Stow.Kind) > 0 { + for key, val := range storageConfig.Stow.Config { + commands = append(commands, "--storage.stow.config") + commands = append(commands, fmt.Sprintf("%s=%s", key, val)) + } + return append(commands, fmt.Sprintf("--storage.stow.kind=%s", storageConfig.Stow.Kind)) + } + return append(commands, []string{ + fmt.Sprintf("--storage.connection.secret-key=%s", storageConfig.Connection.SecretKey), + fmt.Sprintf("--storage.connection.access-key=%s", storageConfig.Connection.AccessKey), + fmt.Sprintf("--storage.connection.auth-type=%s", storageConfig.Connection.AuthType), + fmt.Sprintf("--storage.connection.region=%s", storageConfig.Connection.Region), + fmt.Sprintf("--storage.connection.endpoint=%s", storageConfig.Connection.Endpoint.String()), + }...) +} + +func SidecarCommandArgs(fromLocalPath string, outputPrefix, rawOutputPath storage.DataReference, uploadTimeout time.Duration, iface *core.TypedInterface) ([]string, error) { + if iface == nil { + return nil, fmt.Errorf("interface is required for CoPilot Sidecar") + } + b, err := proto.Marshal(iface) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal given core.TypedInterface") + } + return []string{ + "sidecar", + "--timeout", + uploadTimeout.String(), + "--to-raw-output", + rawOutputPath.String(), + "--to-output-prefix", + outputPrefix.String(), + "--from-local-dir", + fromLocalPath, + "--interface", + base64.StdEncoding.EncodeToString(b), + }, nil +} + +func DownloadCommandArgs(fromInputsPath, outputPrefix storage.DataReference, toLocalPath string, format core.DataLoadingConfig_LiteralMapFormat, inputInterface *core.VariableMap) ([]string, error) { + if inputInterface == nil { + return nil, fmt.Errorf("input Interface is required for CoPilot Downloader") + } + b, err := proto.Marshal(inputInterface) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal given input interface") + } + return []string{ + "download", + "--from-remote", + fromInputsPath.String(), + "--to-output-prefix", + outputPrefix.String(), + "--to-local-dir", + toLocalPath, + "--format", + format.String(), + "--input-interface", + base64.StdEncoding.EncodeToString(b), + }, nil +} + +func DataVolume(name string, size *resource.Quantity) v1.Volume { + return v1.Volume{ + Name: name, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{ + Medium: v1.StorageMediumDefault, + SizeLimit: size, + }, + }, + } +} + +func CalculateStorageSize(requirements *v1.ResourceRequirements) *resource.Quantity { + if requirements == nil { + return nil + } + s, ok := requirements.Limits[v1.ResourceStorage] + if ok { + return &s + } + s, ok = requirements.Requests[v1.ResourceStorage] + if ok { + return &s + } + return nil +} + +func AddCoPilotToContainer(ctx context.Context, cfg config.FlyteCoPilotConfig, c *v1.Container, iFace *core.TypedInterface, pilot *core.DataLoadingConfig) error { + if pilot == nil || !pilot.Enabled { + return nil + } + logger.Infof(ctx, "Enabling CoPilot on main container [%s]", c.Name) + if c.SecurityContext == nil { + c.SecurityContext = &v1.SecurityContext{} + } + if c.SecurityContext.Capabilities == nil { + c.SecurityContext.Capabilities = &v1.Capabilities{} + } + + if iFace != nil { + if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 { + inPath := cfg.DefaultInputDataPath + if pilot.GetInputPath() != "" { + inPath = pilot.GetInputPath() + } + + c.VolumeMounts = append(c.VolumeMounts, v1.VolumeMount{ + Name: cfg.InputVolumeName, + MountPath: inPath, + }) + } + + if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 { + outPath := cfg.DefaultOutputPath + if pilot.GetOutputPath() != "" { + outPath = pilot.GetOutputPath() + } + c.VolumeMounts = append(c.VolumeMounts, v1.VolumeMount{ + Name: cfg.OutputVolumeName, + MountPath: outPath, + }) + } + } + return nil +} + +func AddCoPilotToPod(ctx context.Context, cfg config.FlyteCoPilotConfig, coPilotPod *v1.PodSpec, iFace *core.TypedInterface, taskExecMetadata core2.TaskExecutionMetadata, inputPaths io.InputFilePaths, outputPaths io.OutputFilePaths, pilot *core.DataLoadingConfig) error { + if pilot == nil || !pilot.Enabled { + return nil + } + + //nolint:protogetter + logger.Infof(ctx, "CoPilot Enabled for task [%s]", taskExecMetadata.GetTaskExecutionID().GetID().TaskId.GetName()) + if iFace != nil { + if iFace.Inputs != nil && len(iFace.Inputs.Variables) > 0 { + inPath := cfg.DefaultInputDataPath + if pilot.GetInputPath() != "" { + inPath = pilot.GetInputPath() + } + + // TODO we should calculate input volume size based on the size of the inputs which is known ahead of time. We should store that as part of the metadata + size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources()) + logger.Infof(ctx, "Adding Input path [%s] of Size [%d] for Task [%s]", inPath, size, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) + inputsVolumeMount := v1.VolumeMount{ + Name: cfg.InputVolumeName, + MountPath: inPath, + } + + format := pilot.Format + // Lets add the InputsVolume + coPilotPod.Volumes = append(coPilotPod.Volumes, DataVolume(cfg.InputVolumeName, size)) + + // Lets add the Inputs init container + args, err := DownloadCommandArgs(inputPaths.GetInputPath(), outputPaths.GetOutputPrefixPath(), inPath, format, iFace.Inputs) + if err != nil { + return err + } + downloader, err := FlyteCoPilotContainer(flyteDownloaderContainerName, cfg, args, inputsVolumeMount) + if err != nil { + return err + } + coPilotPod.InitContainers = append(coPilotPod.InitContainers, downloader) + } + + if iFace.Outputs != nil && len(iFace.Outputs.Variables) > 0 { + outPath := cfg.DefaultOutputPath + if pilot.GetOutputPath() != "" { + outPath = pilot.GetOutputPath() + } + + size := CalculateStorageSize(taskExecMetadata.GetOverrides().GetResources()) + logger.Infof(ctx, "Adding Output path [%s] of size [%d] for Task [%s]", size, outPath, taskExecMetadata.GetTaskExecutionID().GetID().TaskId.Name) + + outputsVolumeMount := v1.VolumeMount{ + Name: cfg.OutputVolumeName, + MountPath: outPath, + } + + // Lets add the InputsVolume + coPilotPod.Volumes = append(coPilotPod.Volumes, DataVolume(cfg.OutputVolumeName, size)) + + // Lets add the Inputs init container + args, err := SidecarCommandArgs(outPath, outputPaths.GetOutputPrefixPath(), outputPaths.GetRawOutputPrefix(), cfg.Timeout.Duration, iFace) + if err != nil { + return err + } + sidecar, err := FlyteCoPilotContainer(flyteSidecarContainerName, cfg, args, outputsVolumeMount) + // Make it into sidecar container + restartPolicy := v1.ContainerRestartPolicyAlways + sidecar.RestartPolicy = &restartPolicy + if err != nil { + return err + } + // Let the sidecar container start before the downloader; it will ensure the signal watcher is started before the main container finishes. + coPilotPod.InitContainers = append([]v1.Container{sidecar}, coPilotPod.InitContainers...) + + timeoutSeconds := int64(cfg.Timeout.Duration.Seconds()) + coPilotPod.TerminationGracePeriodSeconds = &timeoutSeconds + } + } + + return nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go new file mode 100644 index 0000000000..384b5cd26d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/copilot_test.go @@ -0,0 +1,599 @@ +package flytek8s + +import ( + "context" + "encoding/base64" + "reflect" + "testing" + "time" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + pluginsCoreMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginsIOMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + config2 "github.com/flyteorg/flyte/v2/flytestdlib/config" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var resourceRequirements = &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceStorage: resource.MustParse("100M"), + }, +} + +func TestFlyteCoPilotContainer(t *testing.T) { + cfg := config.FlyteCoPilotConfig{ + NamePrefix: "test-", + Image: "test", + DefaultInputDataPath: "/in", + DefaultOutputPath: "/out", + InputVolumeName: "inp", + OutputVolumeName: "out", + StartTimeout: config2.Duration{ + Duration: time.Second * 1, + }, + CPU: "1024m", + Memory: "1024Mi", + } + + t.Run("happy", func(t *testing.T) { + c, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}) + assert.NoError(t, err) + assert.Equal(t, "test-x", c.Name) + assert.Equal(t, "test", c.Image) + assert.Equal(t, CopilotCommandArgs(storage.GetConfig()), c.Command) + assert.Equal(t, []string{"hello"}, c.Args) + assert.Equal(t, 0, len(c.VolumeMounts)) + assert.Equal(t, "/", c.WorkingDir) + assert.Equal(t, 2, len(c.Resources.Limits)) + assert.Equal(t, 2, len(c.Resources.Requests)) + }) + + t.Run("happy stow backend", func(t *testing.T) { + storage.GetConfig().Stow.Kind = "S3" + storage.GetConfig().Stow.Config = map[string]string{ + "path": "config.yaml", + } + c, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}) + assert.NoError(t, err) + assert.Equal(t, "test-x", c.Name) + assert.Equal(t, "test", c.Image) + assert.Equal(t, CopilotCommandArgs(storage.GetConfig()), c.Command) + assert.Equal(t, []string{"hello"}, c.Args) + assert.Equal(t, 0, len(c.VolumeMounts)) + assert.Equal(t, "/", c.WorkingDir) + assert.Equal(t, 2, len(c.Resources.Limits)) + assert.Equal(t, 2, len(c.Resources.Requests)) + }) + + t.Run("happy-vols", func(t *testing.T) { + c, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}, v1.VolumeMount{Name: "X", MountPath: "/"}) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.VolumeMounts)) + }) + + t.Run("happy stow GCP backend", func(t *testing.T) { + storage.GetConfig().Type = storage.TypeStow + storage.GetConfig().InitContainer = "bucket" + storage.GetConfig().Stow.Kind = "google" + storage.GetConfig().Stow.Config = map[string]string{ + "json": "", + "project_id": "flyte-gcp", + "scope": "read_write", + } + assert.Equal(t, 12, len(CopilotCommandArgs(storage.GetConfig()))) + }) + + t.Run("storage override", func(t *testing.T) { + + storageConfigOverride := storage.Config{} + + storageConfigOverride.Type = storage.TypeStow + storageConfigOverride.InitContainer = "bucket" + storageConfigOverride.Stow.Kind = "google" + storageConfigOverride.Stow.Config = map[string]string{ + "json": "", + "project_id": "flyte-gcp", + } + cfg.StorageConfigOverride = &storageConfigOverride + + c, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}, v1.VolumeMount{Name: "X", MountPath: "/"}) + assert.NoError(t, err) + assert.Equal(t, 1, len(c.VolumeMounts)) + + assert.ElementsMatch(t, c.Command, CopilotCommandArgs(&storageConfigOverride)) + }) + + t.Run("bad-res-cpu", func(t *testing.T) { + old := cfg.CPU + cfg.CPU = "x" + _, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}, v1.VolumeMount{Name: "X", MountPath: "/"}) + assert.Error(t, err) + cfg.CPU = old + }) + + t.Run("bad-res-mem", func(t *testing.T) { + old := cfg.Memory + cfg.Memory = "x" + _, err := FlyteCoPilotContainer("x", cfg, []string{"hello"}, v1.VolumeMount{Name: "X", MountPath: "/"}) + assert.Error(t, err) + cfg.Memory = old + }) +} + +func TestDownloadCommandArgs(t *testing.T) { + _, err := DownloadCommandArgs("", "", "", core.DataLoadingConfig_YAML, nil) + assert.Error(t, err) + + iFace := &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + } + d, err := DownloadCommandArgs("s3://from", "s3://output-meta", "/to", core.DataLoadingConfig_JSON, iFace) + assert.NoError(t, err) + expected := []string{"download", "--from-remote", "s3://from", "--to-output-prefix", "s3://output-meta", "--to-local-dir", "/to", "--format", "JSON", "--input-interface", ""} + if assert.Len(t, d, len(expected)) { + for i := 0; i < len(expected)-1; i++ { + assert.Equal(t, expected[i], d[i]) + } + // We cannot compare the last one, as the interface is a map the order is not guaranteed. + ifaceB64 := d[len(expected)-1] + serIFaceBytes, err := base64.StdEncoding.DecodeString(ifaceB64) + if assert.NoError(t, err) { + vm := &core.VariableMap{} + assert.NoError(t, proto.Unmarshal(serIFaceBytes, vm)) + assert.Len(t, vm.Variables, 2) + for k, v := range iFace.Variables { + v2, ok := vm.Variables[k] + assert.True(t, ok) + assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k) + } + } + } +} + +func TestSidecarCommandArgs(t *testing.T) { + _, err := SidecarCommandArgs("", "", "", time.Second*10, nil) + assert.Error(t, err) + + iFace := &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + d, err := SidecarCommandArgs("/from", "s3://output-meta", "s3://raw-output", time.Hour*1, iFace) + assert.NoError(t, err) + expected := []string{"sidecar", "--timeout", "1h0m0s", "--to-raw-output", "s3://raw-output", "--to-output-prefix", "s3://output-meta", "--from-local-dir", "/from", "--interface", ""} + if assert.Len(t, d, len(expected)) { + for i := 0; i < len(expected)-1; i++ { + assert.Equal(t, expected[i], d[i]) + } + // We cannot compare the last one, as the interface is a map the order is not guaranteed. + ifaceB64 := d[len(expected)-1] + serIFaceBytes, err := base64.StdEncoding.DecodeString(ifaceB64) + if assert.NoError(t, err) { + if2 := &core.TypedInterface{} + assert.NoError(t, proto.Unmarshal(serIFaceBytes, if2)) + assert.Len(t, if2.Outputs.Variables, 2) + for k, v := range iFace.Outputs.Variables { + v2, ok := if2.Outputs.Variables[k] + assert.True(t, ok) + assert.Equal(t, v.Type.GetSimple(), v2.Type.GetSimple(), "for %s, types do not match", k) + } + } + } +} + +func TestDataVolume(t *testing.T) { + v := DataVolume("x", nil) + assert.Equal(t, "x", v.Name) + assert.NotNil(t, v.EmptyDir) + assert.Nil(t, v.EmptyDir.SizeLimit) + assert.Equal(t, v1.StorageMediumDefault, v.EmptyDir.Medium) + + q := resource.MustParse("1024Mi") + v = DataVolume("x", &q) + assert.NotNil(t, v.EmptyDir.SizeLimit) + assert.Equal(t, q, *v.EmptyDir.SizeLimit) +} + +func assertContainerHasVolumeMounts(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *core.DataLoadingConfig, iFace *core.TypedInterface, c *v1.Container) { + if iFace != nil { + vmap := map[string]v1.VolumeMount{} + for _, v := range c.VolumeMounts { + vmap[v.Name] = v + } + if iFace.Inputs != nil { + path := cfg.DefaultInputDataPath + if pilot.InputPath != "" { + path = pilot.InputPath + } + v, found := vmap[cfg.InputVolumeName] + assert.Equal(t, path, v.MountPath, "Input Path does not match") + assert.True(t, found, "Input volume mount expected but not found!") + } + + if iFace.Outputs != nil { + path := cfg.DefaultOutputPath + if pilot.OutputPath != "" { + path = pilot.OutputPath + } + v, found := vmap[cfg.OutputVolumeName] + assert.Equal(t, path, v.MountPath, "Output Path does not match") + assert.True(t, found, "Output volume mount expected but not found!") + } + } else { + assert.Len(t, c.VolumeMounts, 0) + } +} + +func assertPodHasCoPilot(t *testing.T, cfg config.FlyteCoPilotConfig, pilot *core.DataLoadingConfig, iFace *core.TypedInterface, pod *v1.PodSpec) { + containers := append(pod.Containers, pod.InitContainers...) + for _, c := range containers { + if c.Name == "test" { + cntr := c + assertContainerHasVolumeMounts(t, cfg, pilot, iFace, &cntr) + } else { + if c.Name == cfg.NamePrefix+flyteDownloaderContainerName || c.Name == cfg.NamePrefix+flyteSidecarContainerName { + if iFace != nil { + vmap := map[string]v1.VolumeMount{} + for _, v := range c.VolumeMounts { + vmap[v.Name] = v + } + if iFace.Inputs != nil { + path := cfg.DefaultInputDataPath + if pilot != nil { + path = pilot.InputPath + } + v, found := vmap[cfg.InputVolumeName] + if c.Name == cfg.NamePrefix+flyteDownloaderContainerName { + assert.Equal(t, path, v.MountPath, "Input Path does not match") + assert.True(t, found, "Input volume mount expected but not found!") + } else { + assert.False(t, found, "Input volume mount not expected but found!") + } + } + + if iFace.Outputs != nil { + path := cfg.DefaultOutputPath + if pilot != nil { + path = pilot.OutputPath + } + v, found := vmap[cfg.OutputVolumeName] + if c.Name == cfg.NamePrefix+flyteDownloaderContainerName { + assert.False(t, found, "Output volume mount not expected but found on init container!") + } else { + assert.Equal(t, path, v.MountPath, "Output Path does not match") + assert.True(t, found, "Output volume mount expected but not found!") + } + } + + } else { + assert.Len(t, c.VolumeMounts, 0) + } + } + } + } +} + +func TestCalculateStorageSize(t *testing.T) { + twoG := resource.MustParse("2048Mi") + oneG := resource.MustParse("1024Mi") + tests := []struct { + name string + args *v1.ResourceRequirements + want *resource.Quantity + }{ + {"nil", nil, nil}, + {"empty", &v1.ResourceRequirements{}, nil}, + {"limits", &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceStorage: twoG, + }}, &twoG}, + {"requests", &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceStorage: oneG, + }}, &oneG}, + + {"max", &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceStorage: twoG, + }, + Requests: v1.ResourceList{ + v1.ResourceStorage: oneG, + }}, &twoG}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := CalculateStorageSize(tt.args); !reflect.DeepEqual(got, tt.want) { + t.Errorf("CalculateStorageSize() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestAddCoPilotToContainer(t *testing.T) { + ctx := context.TODO() + cfg := config.FlyteCoPilotConfig{ + NamePrefix: "test-", + Image: "test", + DefaultInputDataPath: "/in", + DefaultOutputPath: "/out", + InputVolumeName: "inp", + OutputVolumeName: "out", + CPU: "1024m", + Memory: "1024Mi", + } + + t.Run("dataload-config-nil", func(t *testing.T) { + pilot := &core.DataLoadingConfig{} + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, nil, nil, pilot)) + }) + + t.Run("disabled", func(t *testing.T) { + pilot := &core.DataLoadingConfig{} + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, nil, nil, pilot)) + }) + + t.Run("nil-iface", func(t *testing.T) { + c := v1.Container{} + pilot := &core.DataLoadingConfig{Enabled: true} + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, &c, nil, pilot)) + assertContainerHasVolumeMounts(t, cfg, pilot, nil, &c) + }) + + t.Run("happy-iface-empty-config", func(t *testing.T) { + + c := v1.Container{} + iface := &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{Enabled: true} + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, &c, iface, pilot)) + assertContainerHasVolumeMounts(t, cfg, pilot, iface, &c) + }) + + t.Run("happy-iface-set-config", func(t *testing.T) { + + c := v1.Container{} + iface := &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, &c, iface, pilot)) + assertContainerHasVolumeMounts(t, cfg, pilot, iface, &c) + }) + + t.Run("happy-iface-inputs", func(t *testing.T) { + + c := v1.Container{} + iface := &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, &c, iface, pilot)) + assertContainerHasVolumeMounts(t, cfg, pilot, iface, &c) + }) + + t.Run("happy-iface-outputs", func(t *testing.T) { + + c := v1.Container{} + iface := &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToContainer(ctx, cfg, &c, iface, pilot)) + assertContainerHasVolumeMounts(t, cfg, pilot, iface, &c) + }) +} + +func TestAddCoPilotToPod(t *testing.T) { + ctx := context.TODO() + cfg := config.FlyteCoPilotConfig{ + NamePrefix: "test-", + Image: "test", + DefaultInputDataPath: "/in", + DefaultOutputPath: "/out", + InputVolumeName: "inp", + OutputVolumeName: "out", + StartTimeout: config2.Duration{ + Duration: time.Second * 1, + }, + CPU: "1024m", + Memory: "1024Mi", + } + + taskMetadata := &pluginsCoreMock.TaskExecutionMetadata{} + taskMetadata.OnGetNamespace().Return("test-namespace") + taskMetadata.OnGetAnnotations().Return(map[string]string{"annotation-1": "val1"}) + taskMetadata.OnGetLabels().Return(map[string]string{"label-1": "val1"}) + taskMetadata.OnGetOwnerReference().Return(metav1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskMetadata.OnGetK8sServiceAccount().Return("") + taskMetadata.OnGetOwnerID().Return(types.NamespacedName{ + Namespace: "test-namespace", + Name: "test-owner-name", + }) + taskMetadata.OnIsInterruptible().Return(false) + + tID := &pluginsCoreMock.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + Name: "my-task", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("name") + taskMetadata.OnGetTaskExecutionID().Return(tID) + + to := &pluginsCoreMock.TaskOverrides{} + to.OnGetResources().Return(resourceRequirements) + taskMetadata.OnGetOverrides().Return(to) + + inputPaths := &pluginsIOMock.InputFilePaths{} + inputs := "/base/inputs" + inputPaths.OnGetInputPrefixPath().Return(storage.DataReference(inputs)) + inputPaths.OnGetInputPath().Return(storage.DataReference(inputs + "/inputs.pb")) + + opath := &pluginsIOMock.OutputFilePaths{} + opath.OnGetRawOutputPrefix().Return("/raw") + opath.OnGetOutputPrefixPath().Return("/output") + + t.Run("happy", func(t *testing.T) { + pod := v1.PodSpec{} + iface := &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + assert.Equal(t, pod.InitContainers[0].Name, cfg.NamePrefix+flyteSidecarContainerName) + assert.Equal(t, pod.InitContainers[1].Name, cfg.NamePrefix+flyteDownloaderContainerName) + assertPodHasCoPilot(t, cfg, pilot, iface, &pod) + }) + + t.Run("happy-nil-iface", func(t *testing.T) { + pod := v1.PodSpec{} + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, nil, taskMetadata, inputPaths, opath, pilot)) + assertPodHasCoPilot(t, cfg, pilot, nil, &pod) + }) + + t.Run("happy-inputs-only", func(t *testing.T) { + pod := v1.PodSpec{} + iface := &core.TypedInterface{ + Inputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "x": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + "y": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + assertPodHasCoPilot(t, cfg, pilot, iface, &pod) + }) + + t.Run("happy-outputs-only", func(t *testing.T) { + pod := v1.PodSpec{} + iface := &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: true, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + assertPodHasCoPilot(t, cfg, pilot, iface, &pod) + }) + + t.Run("disabled", func(t *testing.T) { + pod := v1.PodSpec{} + iface := &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "o": {Type: &core.LiteralType{Type: &core.LiteralType_Simple{Simple: core.SimpleType_INTEGER}}}, + }, + }, + } + pilot := &core.DataLoadingConfig{ + Enabled: false, + InputPath: "in", + OutputPath: "out", + } + assert.NoError(t, AddCoPilotToPod(ctx, cfg, &pod, iface, taskMetadata, inputPaths, opath, pilot)) + assert.Len(t, pod.Volumes, 0) + }) + + t.Run("nil", func(t *testing.T) { + assert.NoError(t, AddCoPilotToPod(ctx, cfg, nil, nil, taskMetadata, inputPaths, opath, nil)) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go new file mode 100644 index 0000000000..90a1cf24d1 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds.go @@ -0,0 +1,218 @@ +package flytek8s + +import ( + "context" + "fmt" + "os" + "strconv" + "strings" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/util/sets" + + //propellerCfg "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" +) + +const ( + flyteExecutionURL = "FLYTE_EXECUTION_URL" +) + +func GetContextEnvVars(ownerCtx context.Context) []v1.EnvVar { + var envVars []v1.EnvVar + + if ownerCtx == nil { + return envVars + } + + // Injecting useful env vars from the context + if wfName := contextutils.Value(ownerCtx, contextutils.WorkflowIDKey); wfName != "" { + envVars = append(envVars, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_EXECUTION_WORKFLOW", + Value: wfName, + }, + ) + } + return envVars +} + +func GetExecutionEnvVars(id pluginsCore.TaskExecutionID, consoleURL string) []v1.EnvVar { + + if id == nil || id.GetID().NodeExecutionId == nil || id.GetID().NodeExecutionId.ExecutionId == nil { + return []v1.EnvVar{} + } + + // Execution level env variables. + nodeExecutionID := id.GetID().NodeExecutionId.ExecutionId + attemptNumber := strconv.Itoa(int(id.GetID().RetryAttempt)) + envVars := []v1.EnvVar{ + { + Name: "FLYTE_INTERNAL_EXECUTION_ID", + Value: nodeExecutionID.Name, + }, + { + Name: "FLYTE_INTERNAL_EXECUTION_PROJECT", + Value: nodeExecutionID.Project, + }, + { + Name: "FLYTE_INTERNAL_EXECUTION_DOMAIN", + Value: nodeExecutionID.Domain, + }, + { + // FLYTE_INTERNAL_POD_NAME + Name: "_F_PN", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "FLYTE_ATTEMPT_NUMBER", + Value: attemptNumber, + }, + } + + if len(consoleURL) > 0 { + consoleURL = strings.TrimRight(consoleURL, "/") + envVars = append(envVars, v1.EnvVar{ + Name: flyteExecutionURL, + Value: fmt.Sprintf("%s/projects/%s/domains/%s/executions/%s/nodeId/%s/nodes", consoleURL, nodeExecutionID.Project, nodeExecutionID.Domain, nodeExecutionID.Name, id.GetUniqueNodeID()), + }) + } + + // Task definition Level env variables. + if id.GetID().TaskId != nil { + taskID := id.GetID().TaskId + + envVars = append(envVars, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_TASK_PROJECT", + Value: taskID.Project, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_TASK_DOMAIN", + Value: taskID.Domain, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_TASK_NAME", + Value: taskID.Name, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_TASK_VERSION", + Value: taskID.Version, + }, + // Historic Task Definition Level env variables. + // Remove these once SDK is migrated to use the new ones. + v1.EnvVar{ + Name: "FLYTE_INTERNAL_PROJECT", + Value: taskID.Project, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_DOMAIN", + Value: taskID.Domain, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_NAME", + Value: taskID.Name, + }, + v1.EnvVar{ + Name: "FLYTE_INTERNAL_VERSION", + Value: taskID.Version, + }) + + } + return envVars +} + +func GetLiteralOffloadingEnvVars() []v1.EnvVar { + // TODO @pvditt fix + //propellerConfig := propellerCfg.GetConfig() + //if !propellerConfig.LiteralOffloadingConfig.Enabled { + // return []v1.EnvVar{} + //} + + envVars := []v1.EnvVar{} + //if propellerConfig.LiteralOffloadingConfig.MinSizeInMBForOffloading > 0 { + // envVars = append(envVars, + // v1.EnvVar{ + // Name: "_F_L_MIN_SIZE_MB", + // Value: strconv.FormatInt(propellerConfig.LiteralOffloadingConfig.MinSizeInMBForOffloading, 10), + // }, + // ) + //} + //if propellerConfig.LiteralOffloadingConfig.MaxSizeInMBForOffloading > 0 { + // envVars = append(envVars, + // v1.EnvVar{ + // Name: "_F_L_MAX_SIZE_MB", + // Value: strconv.FormatInt(propellerConfig.LiteralOffloadingConfig.MaxSizeInMBForOffloading, 10), + // }, + // ) + //} + return envVars +} + +func DecorateEnvVars(ctx context.Context, envVars []v1.EnvVar, envFroms []v1.EnvFromSource, taskEnvironmentVariables map[string]string, id pluginsCore.TaskExecutionID, consoleURL string) ([]v1.EnvVar, []v1.EnvFromSource) { + envVars = append(envVars, GetContextEnvVars(ctx)...) + envVars = append(envVars, GetExecutionEnvVars(id, consoleURL)...) + envVars = append(envVars, GetLiteralOffloadingEnvVars()...) + + for k, v := range taskEnvironmentVariables { + envVars = append(envVars, v1.EnvVar{Name: k, Value: v}) + } + for k, v := range config.GetK8sPluginConfig().DefaultEnvVars { + envVars = append(envVars, v1.EnvVar{Name: k, Value: v}) + } + for k, envVarName := range config.GetK8sPluginConfig().DefaultEnvVarsFromEnv { + value := os.Getenv(envVarName) + envVars = append(envVars, v1.EnvVar{Name: k, Value: value}) + } + + for _, secretName := range config.GetK8sPluginConfig().DefaultEnvFromSecrets { + optional := true + secretRef := v1.SecretEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: secretName}, Optional: &optional} + envFroms = append(envFroms, v1.EnvFromSource{SecretRef: &secretRef}) + } + + for _, cmName := range config.GetK8sPluginConfig().DefaultEnvFromConfigMaps { + optional := true + cmRef := v1.ConfigMapEnvSource{LocalObjectReference: v1.LocalObjectReference{Name: cmName}, Optional: &optional} + envFroms = append(envFroms, v1.EnvFromSource{ConfigMapRef: &cmRef}) + } + + return envVars, envFroms +} + +func GetPodTolerations(interruptible bool, resourceRequirements ...v1.ResourceRequirements) []v1.Toleration { + // 1. Get the tolerations for the resources requested + var tolerations []v1.Toleration + resourceNames := sets.NewString() + for _, resources := range resourceRequirements { + for r := range resources.Limits { + resourceNames.Insert(r.String()) + } + for r := range resources.Requests { + resourceNames.Insert(r.String()) + } + } + + resourceTols := config.GetK8sPluginConfig().ResourceTolerations + for _, r := range resourceNames.UnsortedList() { + if v, ok := resourceTols[v1.ResourceName(r)]; ok { + tolerations = append(tolerations, v...) + } + } + + // 2. Get the tolerations for interruptible pods + if interruptible { + tolerations = append(tolerations, config.GetK8sPluginConfig().InterruptibleTolerations...) + } + + // 3. Add default tolerations + tolerations = append(tolerations, config.GetK8sPluginConfig().DefaultTolerations...) + + return tolerations +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go new file mode 100644 index 0000000000..ed4f2b803c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/k8s_resource_adds_test.go @@ -0,0 +1,403 @@ +package flytek8s + +import ( + "context" + "os" + "reflect" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + v12 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + //propellerCfg "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestGetExecutionEnvVars(t *testing.T) { + mock := mockTaskExecutionIdentifier{} + tests := []struct { + name string + expectedEnvVars int + consoleURL string + expectedEnvVar *v12.EnvVar + }{ + { + "no-console-url", + 13, + "", + nil, + }, + { + "with-console-url", + 14, + "scheme://host/path", + &v12.EnvVar{ + Name: "FLYTE_EXECUTION_URL", + Value: "scheme://host/path/projects/proj/domains/domain/executions/name/nodeId/unique-node-id/nodes", + }, + }, + { + "with-console-url-ending-in-single-slash", + 14, + "scheme://host/path/", + &v12.EnvVar{ + Name: "FLYTE_EXECUTION_URL", + Value: "scheme://host/path/projects/proj/domains/domain/executions/name/nodeId/unique-node-id/nodes", + }, + }, + { + "with-console-url-ending-in-multiple-slashes", + 14, + "scheme://host/path////", + &v12.EnvVar{ + Name: "FLYTE_EXECUTION_URL", + Value: "scheme://host/path/projects/proj/domains/domain/executions/name/nodeId/unique-node-id/nodes", + }, + }, + } + for _, tt := range tests { + envVars := GetExecutionEnvVars(mock, tt.consoleURL) + assert.Len(t, envVars, tt.expectedEnvVars) + if tt.expectedEnvVar != nil { + assert.True(t, proto.Equal(&envVars[5], tt.expectedEnvVar)) + } + } +} + +func TestGetTolerationsForResources(t *testing.T) { + var empty []v12.Toleration + var emptyConfig map[v12.ResourceName][]v12.Toleration + + tolGPU := v12.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v12.TolerationOpEqual, + Effect: v12.TaintEffectNoSchedule, + } + + tolEphemeralStorage := v12.Toleration{ + Key: "ephemeral-storage", + Value: "dedicated", + Operator: v12.TolerationOpExists, + Effect: v12.TaintEffectNoSchedule, + } + + type args struct { + resources v12.ResourceRequirements + } + tests := []struct { + name string + args args + setVal map[v12.ResourceName][]v12.Toleration + setDefaults []v12.Toleration + want []v12.Toleration + }{ + { + "no-tolerations-limits", + args{ + v12.ResourceRequirements{ + Limits: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + emptyConfig, + nil, + empty, + }, + { + "no-tolerations-req", + args{ + v12.ResourceRequirements{ + Requests: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + emptyConfig, + nil, + empty, + }, + { + "no-tolerations-both", + args{ + v12.ResourceRequirements{ + Limits: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Requests: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + emptyConfig, + nil, + empty, + }, + { + "tolerations-limits", + args{ + v12.ResourceRequirements{ + Limits: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + map[v12.ResourceName][]v12.Toleration{ + v12.ResourceEphemeralStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + nil, + []v12.Toleration{tolEphemeralStorage}, + }, + { + "tolerations-req", + args{ + v12.ResourceRequirements{ + Requests: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + map[v12.ResourceName][]v12.Toleration{ + v12.ResourceEphemeralStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + nil, + []v12.Toleration{tolEphemeralStorage}, + }, + { + "tolerations-both", + args{ + v12.ResourceRequirements{ + Limits: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Requests: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + map[v12.ResourceName][]v12.Toleration{ + v12.ResourceEphemeralStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + nil, + []v12.Toleration{tolEphemeralStorage}, + }, + { + "no-tolerations-both", + args{ + v12.ResourceRequirements{ + Limits: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: v12.ResourceList{ + v12.ResourceCPU: resource.MustParse("1024m"), + v12.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, + }, + map[v12.ResourceName][]v12.Toleration{ + v12.ResourceEphemeralStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + nil, + []v12.Toleration{tolEphemeralStorage, tolGPU}, + }, + { + "default-tolerations", + args{}, + nil, + []v12.Toleration{tolEphemeralStorage}, + []v12.Toleration{tolEphemeralStorage}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ResourceTolerations: tt.setVal, DefaultTolerations: tt.setDefaults})) + if got := GetPodTolerations(true, tt.args.resources); len(got) != len(tt.want) { + t.Errorf("GetPodTolerations() = %v, want %v", got, tt.want) + } else { + for _, tol := range tt.want { + assert.Contains(t, got, tol) + } + } + }) + } +} + +var testTaskExecutionIdentifier = core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Project: "proj", + Domain: "domain", + Name: "name", + }, + RetryAttempt: 1, + NodeExecutionId: &core.NodeExecutionIdentifier{ + NodeId: "nodeId", + ExecutionId: &core.WorkflowExecutionIdentifier{ + Project: "proj", + Domain: "domain", + Name: "name", + }, + }, +} + +type mockTaskExecutionIdentifier struct{} + +func (m mockTaskExecutionIdentifier) GetID() core.TaskExecutionIdentifier { + return testTaskExecutionIdentifier +} + +func (m mockTaskExecutionIdentifier) GetGeneratedNameWith(minLength, maxLength int) (string, error) { + return "task-exec-name", nil +} + +func (m mockTaskExecutionIdentifier) GetGeneratedName() string { + return "task-exec-name" +} + +func (m mockTaskExecutionIdentifier) GetUniqueNodeID() string { + return "unique-node-id" +} + +func TestDecorateEnvVars(t *testing.T) { + ctx := context.Background() + ctx = contextutils.WithWorkflowID(ctx, "fake_workflow") + + defaultEnv := []v12.EnvVar{ + { + Name: "x", + Value: "y", + }, + } + additionalEnv := map[string]string{ + "k": "v", + } + var emptyEnvVar map[string]string + envVarsFromEnv := map[string]string{ + "k": "value", + } + + originalEnvVal := os.Getenv("value") + err := os.Setenv("value", "v") + if err != nil { + t.Fatalf("failed to set env var 'value'; %v", err) + } + defer os.Setenv("value", originalEnvVal) + + expected := append(defaultEnv, GetContextEnvVars(ctx)...) + expected = append(expected, GetExecutionEnvVars(mockTaskExecutionIdentifier{}, "")...) + expectedOffloaded := append(expected, v12.EnvVar{Name: "_F_L_MIN_SIZE_MB", Value: "1"}) + expectedOffloaded = append(expectedOffloaded, v12.EnvVar{Name: "_F_L_MAX_SIZE_MB", Value: "42"}) + + aggregated := append(expected, v12.EnvVar{Name: "k", Value: "v"}) + type args struct { + envVars []v12.EnvVar + id pluginsCore.TaskExecutionID + } + tests := []struct { + name string + args args + additionEnvVar map[string]string + additionEnvVarFromEnv map[string]string + offloadingEnabled bool + offloadingEnvVar map[string]string + executionEnvVar map[string]string + consoleURL string + want []v12.EnvVar + }{ + { + "no-additional", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + emptyEnvVar, + false, + emptyEnvVar, + emptyEnvVar, + "", + expected, + }, + // TODO @pvditt + //{ + // "no-additional-offloading-enabled", + // args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + // emptyEnvVar, + // emptyEnvVar, + // true, + // emptyEnvVar, + // emptyEnvVar, + // "", + // expectedOffloaded, + //}, + { + "with-additional", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + additionalEnv, + emptyEnvVar, + false, + emptyEnvVar, + emptyEnvVar, + "", + aggregated, + }, + { + "from-env", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + envVarsFromEnv, + false, + emptyEnvVar, + emptyEnvVar, + "", + aggregated, + }, + { + "from-execution-metadata", + args{envVars: defaultEnv, id: mockTaskExecutionIdentifier{}}, + emptyEnvVar, + emptyEnvVar, + false, + emptyEnvVar, + additionalEnv, + "", + aggregated, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + // TODO @pvditt + //cfg := propellerCfg.GetConfig() + //cfg.LiteralOffloadingConfig = propellerCfg.LiteralOffloadingConfig{ + // Enabled: tt.offloadingEnabled, + // MinSizeInMBForOffloading: 1, + // MaxSizeInMBForOffloading: 42, + //} + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultEnvVars: tt.additionEnvVar, + DefaultEnvVarsFromEnv: tt.additionEnvVarFromEnv, + })) + if got, _ := DecorateEnvVars(ctx, tt.args.envVars, nil, tt.executionEnvVar, tt.args.id, tt.consoleURL); !reflect.DeepEqual(got, tt.want) { + t.Errorf("DecorateEnvVars() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context.go new file mode 100644 index 0000000000..69900e1a42 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context.go @@ -0,0 +1,182 @@ +package flytek8s + +import ( + "context" + "reflect" + + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type pluginTaskOverrides struct { + pluginsCore.TaskOverrides + resources *v1.ResourceRequirements + extendedResources *core.ExtendedResources +} + +func (to *pluginTaskOverrides) GetResources() *v1.ResourceRequirements { + if to.resources != nil { + return to.resources + } + return to.TaskOverrides.GetResources() +} + +func (to *pluginTaskOverrides) GetExtendedResources() *core.ExtendedResources { + if to.extendedResources != nil { + return to.extendedResources + } + return to.TaskOverrides.GetExtendedResources() +} + +func (to *pluginTaskOverrides) GetContainerImage() string { + return to.TaskOverrides.GetContainerImage() +} + +func (to *pluginTaskOverrides) GetPodTemplate() *core.K8SPod { + return to.TaskOverrides.GetPodTemplate() +} + +type pluginTaskExecutionMetadata struct { + pluginsCore.TaskExecutionMetadata + interruptible *bool + overrides *pluginTaskOverrides +} + +func (tm *pluginTaskExecutionMetadata) IsInterruptible() bool { + if tm.interruptible != nil { + return *tm.interruptible + } + return tm.TaskExecutionMetadata.IsInterruptible() +} + +func (tm *pluginTaskExecutionMetadata) GetOverrides() pluginsCore.TaskOverrides { + if tm.overrides != nil { + return tm.overrides + } + return tm.TaskExecutionMetadata.GetOverrides() +} + +type pluginTaskExecutionContext struct { + pluginsCore.TaskExecutionContext + metadata *pluginTaskExecutionMetadata +} + +func (tc *pluginTaskExecutionContext) TaskExecutionMetadata() pluginsCore.TaskExecutionMetadata { + if tc.metadata != nil { + return tc.metadata + } + return tc.TaskExecutionContext.TaskExecutionMetadata() +} + +type PluginTaskExecutionContextOption func(*pluginTaskExecutionContext) + +func WithInterruptible(v bool) PluginTaskExecutionContextOption { + return func(tc *pluginTaskExecutionContext) { + if tc.metadata == nil { + tc.metadata = &pluginTaskExecutionMetadata{ + TaskExecutionMetadata: tc.TaskExecutionContext.TaskExecutionMetadata(), + } + } + tc.metadata.interruptible = &v + } +} + +func WithResources(r *v1.ResourceRequirements) PluginTaskExecutionContextOption { + return func(tc *pluginTaskExecutionContext) { + if tc.metadata == nil { + tc.metadata = &pluginTaskExecutionMetadata{ + TaskExecutionMetadata: tc.TaskExecutionContext.TaskExecutionMetadata(), + } + } + if tc.metadata.overrides == nil { + tc.metadata.overrides = &pluginTaskOverrides{ + TaskOverrides: tc.metadata.TaskExecutionMetadata.GetOverrides(), + } + } + tc.metadata.overrides.resources = r + } +} + +func WithExtendedResources(er *core.ExtendedResources) PluginTaskExecutionContextOption { + return func(tc *pluginTaskExecutionContext) { + if tc.metadata == nil { + tc.metadata = &pluginTaskExecutionMetadata{ + TaskExecutionMetadata: tc.TaskExecutionContext.TaskExecutionMetadata(), + } + } + if tc.metadata.overrides == nil { + tc.metadata.overrides = &pluginTaskOverrides{ + TaskOverrides: tc.metadata.TaskExecutionMetadata.GetOverrides(), + } + } + tc.metadata.overrides.extendedResources = er + } +} + +func NewPluginTaskExecutionContext(tc pluginsCore.TaskExecutionContext, options ...PluginTaskExecutionContextOption) pluginsCore.TaskExecutionContext { + tm := tc.TaskExecutionMetadata() + to := tm.GetOverrides() + ctx := &pluginTaskExecutionContext{ + TaskExecutionContext: tc, + metadata: &pluginTaskExecutionMetadata{ + TaskExecutionMetadata: tm, + overrides: &pluginTaskOverrides{ + TaskOverrides: to, + }, + }, + } + for _, o := range options { + o(ctx) + } + return ctx +} + +type NodeExecutionK8sReader struct { + namespace string + executionID string + nodeID string + client client.Reader +} + +func NewNodeExecutionK8sReader(meta pluginsCore.TaskExecutionMetadata, client pluginsCore.KubeClient) *NodeExecutionK8sReader { + tID := meta.GetTaskExecutionID().GetID() + executionID := tID.GetNodeExecutionId().GetExecutionId().GetName() + nodeID := tID.GetNodeExecutionId().GetNodeId() + namespace := meta.GetNamespace() + return &NodeExecutionK8sReader{ + namespace: namespace, + executionID: executionID, + nodeID: nodeID, + client: client.GetCache(), + } +} + +func (n NodeExecutionK8sReader) Get(ctx context.Context, key client.ObjectKey, obj client.Object, opts ...client.GetOption) error { + key.Namespace = n.namespace + err := n.client.Get(ctx, key, obj, opts...) + if err != nil { + return err + } + + if obj.GetLabels()["node-id"] != n.nodeID || obj.GetLabels()["execution-id"] != n.executionID { + // reset obj to default value, simulate not found + p := reflect.ValueOf(obj).Elem() + p.Set(reflect.Zero(p.Type())) + kind := obj.GetObjectKind().GroupVersionKind() + return errors.NewNotFound(schema.GroupResource{Group: kind.Group, Resource: kind.Kind}, obj.GetName()) + } + return nil +} + +func (n NodeExecutionK8sReader) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error { + opts = append(opts, client.InNamespace(n.namespace), client.MatchingLabels{ + "execution-id": n.executionID, + "node-id": n.nodeID, + }) + return n.client.List(ctx, list, opts...) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context_test.go new file mode 100644 index 0000000000..99a122f759 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/plugin_exec_context_test.go @@ -0,0 +1,122 @@ +package flytek8s + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func Test_NodeExecutionK8sReader(t *testing.T) { + execID := "abc123" + nodeID := "n0" + typeMeta := metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + } + pod1 := v1.Pod{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: "a", + Namespace: namespace, + Labels: map[string]string{ + "some-label": "bar", + "execution-id": execID, + "node-id": nodeID, + }, + }, + } + pod2 := v1.Pod{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: "b", + Namespace: namespace, + Labels: map[string]string{ + "execution-id": execID, + "node-id": nodeID, + }, + }, + } + pod3 := v1.Pod{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: "c", + Namespace: "wrong", + Labels: map[string]string{ + "execution-id": execID, + "node-id": nodeID, + }, + }, + } + pod4 := v1.Pod{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: "d", + Namespace: namespace, + Labels: map[string]string{ + "execution-id": "wrong", + "node-id": nodeID, + }, + }, + } + pod5 := v1.Pod{ + TypeMeta: typeMeta, + ObjectMeta: metav1.ObjectMeta{ + Name: "e", + Namespace: namespace, + Labels: map[string]string{ + "execution-id": execID, + "node-id": "wrong", + }, + }, + } + pods := []runtime.Object{&pod1, &pod2, &pod3, &pod4, &pod5} + nodeExecReader := NodeExecutionK8sReader{ + namespace: namespace, + executionID: execID, + nodeID: nodeID, + client: fake.NewFakeClient(pods...), + } + ctx := context.TODO() + + t.Run("get", func(t *testing.T) { + p := v1.Pod{} + + err := nodeExecReader.Get(ctx, client.ObjectKeyFromObject(&pod1), &p) + + assert.NoError(t, err) + assert.Equal(t, pod1, p) + }) + + t.Run("get-not-found", func(t *testing.T) { + p := v1.Pod{} + + for _, input := range []*v1.Pod{&pod3, &pod4, &pod5} { + err := nodeExecReader.Get(ctx, client.ObjectKeyFromObject(input), &p) + + assert.True(t, errors.IsNotFound(err)) + } + }) + + t.Run("list", func(t *testing.T) { + p := &v1.PodList{} + expected := &v1.PodList{ + TypeMeta: metav1.TypeMeta{ + Kind: "PodList", + APIVersion: "v1", + }, + Items: []v1.Pod{pod1, pod2}, + } + + err := nodeExecReader.List(ctx, p) + + assert.NoError(t, err) + assert.Equal(t, expected, p) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go new file mode 100644 index 0000000000..ca1a8cc674 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper.go @@ -0,0 +1,1549 @@ +package flytek8s + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/imdario/mergo" + "google.golang.org/protobuf/types/known/timestamppb" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/sets" + "sigs.k8s.io/controller-runtime/pkg/client" + + pluginserrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + // TODO @pvditt fix + //propellerCfg "github.com/flyteorg/flyte/flytepropeller/pkg/controller/config" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const PodKind = "pod" +const OOMKilled = "OOMKilled" +const Interrupted = "Interrupted" +const PrimaryContainerNotFound = "PrimaryContainerNotFound" +const SIGKILL = 137 + +// unsignedSIGKILL = 256 - 9 +const unsignedSIGKILL = 247 + +const defaultContainerTemplateName = "default" +const defaultInitContainerTemplateName = "default-init" +const primaryContainerTemplateName = "primary" +const primaryInitContainerTemplateName = "primary-init" +const PrimaryContainerKey = "primary_container_name" +const FlyteEnableVscode = "_F_E_VS" + +var retryableStatusReasons = sets.NewString( + // Reasons that indicate the node was preempted aggressively. + // Kubelet can miss deleting the pod prior to the node being shutdown. + "Shutdown", + "Terminated", + "NodeShutdown", + // kubelet admission rejects the pod before the node gets assigned appropriate labels. + "NodeAffinity", +) + +// AddRequiredNodeSelectorRequirements adds the provided v1.NodeSelectorRequirement +// objects to an existing v1.Affinity object. If there are no existing required +// node selectors, the new v1.NodeSelectorRequirement will be added as-is. +// However, if there are existing required node selectors, we iterate over all existing +// node selector terms and append the node selector requirement. Note that multiple node +// selector terms are OR'd, and match expressions within a single node selector term +// are AND'd during scheduling. +// See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +func AddRequiredNodeSelectorRequirements(base *v1.Affinity, new ...v1.NodeSelectorRequirement) { + if base.NodeAffinity == nil { + base.NodeAffinity = &v1.NodeAffinity{} + } + if base.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution == nil { + base.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{} + } + if len(base.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms) > 0 { + nodeSelectorTerms := base.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms + for i := range nodeSelectorTerms { + nst := &nodeSelectorTerms[i] + nst.MatchExpressions = append(nst.MatchExpressions, new...) + } + } else { + base.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = []v1.NodeSelectorTerm{v1.NodeSelectorTerm{MatchExpressions: new}} + } +} + +// AddPreferredNodeSelectorRequirements appends the provided v1.NodeSelectorRequirement +// objects to an existing v1.Affinity object's list of preferred scheduling terms. +// See: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity-weight +// for how weights are used during scheduling. +func AddPreferredNodeSelectorRequirements(base *v1.Affinity, weight int32, new ...v1.NodeSelectorRequirement) { + if base.NodeAffinity == nil { + base.NodeAffinity = &v1.NodeAffinity{} + } + base.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append( + base.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + v1.PreferredSchedulingTerm{ + Weight: weight, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: new, + }, + }, + ) +} + +// ApplyInterruptibleNodeSelectorRequirement configures the node selector requirement of the node-affinity using the configuration specified. +func ApplyInterruptibleNodeSelectorRequirement(interruptible bool, affinity *v1.Affinity) { + // Determine node selector terms to add to node affinity + var nodeSelectorRequirement v1.NodeSelectorRequirement + if interruptible { + if config.GetK8sPluginConfig().InterruptibleNodeSelectorRequirement == nil { + return + } + nodeSelectorRequirement = *config.GetK8sPluginConfig().InterruptibleNodeSelectorRequirement + } else { + if config.GetK8sPluginConfig().NonInterruptibleNodeSelectorRequirement == nil { + return + } + nodeSelectorRequirement = *config.GetK8sPluginConfig().NonInterruptibleNodeSelectorRequirement + } + + AddRequiredNodeSelectorRequirements(affinity, nodeSelectorRequirement) +} + +// ApplyInterruptibleNodeAffinity configures the node-affinity for the pod using the configuration specified. +func ApplyInterruptibleNodeAffinity(interruptible bool, podSpec *v1.PodSpec) { + if podSpec.Affinity == nil { + podSpec.Affinity = &v1.Affinity{} + } + + ApplyInterruptibleNodeSelectorRequirement(interruptible, podSpec.Affinity) +} + +// Specialized merging of overrides into a base *core.ExtendedResources object. Note +// that doing a nested merge may not be the intended behavior all the time, so we +// handle each field separately here. +func ApplyExtendedResourcesOverrides(base, overrides *core.ExtendedResources) *core.ExtendedResources { + // Handle case where base might be nil + var new *core.ExtendedResources + if base == nil { + new = &core.ExtendedResources{} + } else { + new = proto.Clone(base).(*core.ExtendedResources) + } + + // No overrides found + if overrides == nil { + return new + } + + // GPU Accelerator + if overrides.GetGpuAccelerator() != nil { + new.GpuAccelerator = overrides.GetGpuAccelerator() + } + + if overrides.GetSharedMemory() != nil { + new.SharedMemory = overrides.GetSharedMemory() + } + + return new +} + +func ApplySharedMemory(podSpec *v1.PodSpec, primaryContainerName string, SharedMemory *core.SharedMemory) error { + sharedMountName := SharedMemory.GetMountName() + sharedMountPath := SharedMemory.GetMountPath() + if sharedMountName == "" { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "mount name is not set") + } + if sharedMountPath == "" { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "mount path is not set") + } + + var primaryContainer *v1.Container + for index, container := range podSpec.Containers { + if container.Name == primaryContainerName { + primaryContainer = &podSpec.Containers[index] + } + } + if primaryContainer == nil { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "Unable to find primary container") + } + + for _, volume := range podSpec.Volumes { + if volume.Name == sharedMountName { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "A volume is already named %v in pod spec", sharedMountName) + } + } + + for _, volume_mount := range primaryContainer.VolumeMounts { + if volume_mount.Name == sharedMountName { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "A volume is already named %v in container", sharedMountName) + } + if volume_mount.MountPath == sharedMountPath { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "%s is already mounted in container", sharedMountPath) + } + } + + var quantity resource.Quantity + var err error + if len(SharedMemory.GetSizeLimit()) != 0 { + quantity, err = resource.ParseQuantity(SharedMemory.GetSizeLimit()) + if err != nil { + return pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "Unable to parse size limit: %v", err.Error()) + } + } + + podSpec.Volumes = append( + podSpec.Volumes, + v1.Volume{ + Name: sharedMountName, + VolumeSource: v1.VolumeSource{EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory, SizeLimit: &quantity}}, + }, + ) + primaryContainer.VolumeMounts = append(primaryContainer.VolumeMounts, v1.VolumeMount{Name: sharedMountName, MountPath: sharedMountPath}) + + return nil +} + +// getAcceleratorConfig returns the configuration for the given accelerator device class. +// It first attempts to get device-class-specific configuration from AcceleratorDeviceClasses. +// If not found or incomplete, it falls back to the global GPU configuration fields for backward compatibility. +func getAcceleratorConfig(gpuAccelerator *core.GPUAccelerator) config.AcceleratorDeviceClassConfig { + cfg := config.GetK8sPluginConfig() + + // Start with defaults from global GPU config + accelConfig := config.AcceleratorDeviceClassConfig{ + ResourceName: cfg.GpuResourceName, + DeviceNodeLabel: cfg.GpuDeviceNodeLabel, + PartitionSizeNodeLabel: cfg.GpuPartitionSizeNodeLabel, + UnpartitionedNodeSelectorRequirement: cfg.GpuUnpartitionedNodeSelectorRequirement, + UnpartitionedToleration: cfg.GpuUnpartitionedToleration, + } + + // Override with device-class-specific config if available + if gpuAccelerator != nil { + deviceClass := gpuAccelerator.GetDeviceClass().String() + if deviceClassConfig, ok := cfg.AcceleratorDeviceClasses[deviceClass]; ok { + logger.Debugf(context.TODO(), "Using device-class-specific configuration for accelerator class: %s", deviceClass) + // Override resource name if specified + if deviceClassConfig.ResourceName != "" { + accelConfig.ResourceName = deviceClassConfig.ResourceName + } + // Override device node label if specified + if deviceClassConfig.DeviceNodeLabel != "" { + accelConfig.DeviceNodeLabel = deviceClassConfig.DeviceNodeLabel + } + // Override partition size node label if specified + if deviceClassConfig.PartitionSizeNodeLabel != "" { + accelConfig.PartitionSizeNodeLabel = deviceClassConfig.PartitionSizeNodeLabel + } + // Override unpartitioned node selector requirement if specified + if deviceClassConfig.UnpartitionedNodeSelectorRequirement != nil { + accelConfig.UnpartitionedNodeSelectorRequirement = deviceClassConfig.UnpartitionedNodeSelectorRequirement + } + // Override unpartitioned toleration if specified + if deviceClassConfig.UnpartitionedToleration != nil { + accelConfig.UnpartitionedToleration = deviceClassConfig.UnpartitionedToleration + } + // Override PodTemplate if specified + if deviceClassConfig.PodTemplate != nil { + accelConfig.PodTemplate = deviceClassConfig.PodTemplate + } + } else { + logger.Warnf(context.TODO(), "Device class '%s' not found in AcceleratorDeviceClasses configuration, falling back to global GPU config. Available device classes: %v", + deviceClass, getConfiguredDeviceClasses(cfg.AcceleratorDeviceClasses)) + } + } + + return accelConfig +} + +func ApplyGPUNodeSelectors(podSpec *v1.PodSpec, gpuAccelerator *core.GPUAccelerator) { + // Short circuit if pod spec does not contain any containers that use accelerators + if !podRequiresAccelerator(podSpec) { + return + } + + if podSpec.Affinity == nil { + podSpec.Affinity = &v1.Affinity{} + } + + // Get device-class-specific configuration + accelConfig := getAcceleratorConfig(gpuAccelerator) + + // Apply changes for GPU device + if device := gpuAccelerator.GetDevice(); len(device) > 0 { + // Normalize the device name + normalizedDevice := GetNormalizedAcceleratorDevice(device) + + // Add node selector requirement for GPU device + deviceNsr := v1.NodeSelectorRequirement{ + Key: accelConfig.DeviceNodeLabel, + Operator: v1.NodeSelectorOpIn, + Values: []string{normalizedDevice}, + } + AddRequiredNodeSelectorRequirements(podSpec.Affinity, deviceNsr) + // Add toleration for GPU device + deviceTol := v1.Toleration{ + Key: accelConfig.DeviceNodeLabel, + Value: normalizedDevice, + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + podSpec.Tolerations = append(podSpec.Tolerations, deviceTol) + } + + // Short circuit if a partition size preference is not specified + partitionSizeValue := gpuAccelerator.GetPartitionSizeValue() + if partitionSizeValue == nil { + return + } + + // Apply changes for GPU partition size, if applicable + var partitionSizeNsr *v1.NodeSelectorRequirement + var partitionSizeTol *v1.Toleration + switch p := partitionSizeValue.(type) { + case *core.GPUAccelerator_Unpartitioned: + if !p.Unpartitioned { + break + } + if accelConfig.UnpartitionedNodeSelectorRequirement != nil { + partitionSizeNsr = accelConfig.UnpartitionedNodeSelectorRequirement + } else { + partitionSizeNsr = &v1.NodeSelectorRequirement{ + Key: accelConfig.PartitionSizeNodeLabel, + Operator: v1.NodeSelectorOpDoesNotExist, + } + } + if accelConfig.UnpartitionedToleration != nil { + partitionSizeTol = accelConfig.UnpartitionedToleration + } + case *core.GPUAccelerator_PartitionSize: + partitionSizeNsr = &v1.NodeSelectorRequirement{ + Key: accelConfig.PartitionSizeNodeLabel, + Operator: v1.NodeSelectorOpIn, + Values: []string{p.PartitionSize}, + } + partitionSizeTol = &v1.Toleration{ + Key: accelConfig.PartitionSizeNodeLabel, + Value: p.PartitionSize, + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + } + if partitionSizeNsr != nil { + AddRequiredNodeSelectorRequirements(podSpec.Affinity, *partitionSizeNsr) + } + if partitionSizeTol != nil { + podSpec.Tolerations = append(podSpec.Tolerations, *partitionSizeTol) + } +} + +// UpdatePod updates the base pod spec used to execute tasks. This is configured with plugins and task metadata-specific options +func UpdatePod(taskExecutionMetadata pluginsCore.TaskExecutionMetadata, + resourceRequirements []v1.ResourceRequirements, podSpec *v1.PodSpec) { + if len(podSpec.RestartPolicy) == 0 { + podSpec.RestartPolicy = v1.RestartPolicyNever + } + podSpec.Tolerations = append( + GetPodTolerations(taskExecutionMetadata.IsInterruptible(), resourceRequirements...), podSpec.Tolerations...) + + if len(podSpec.ServiceAccountName) == 0 { + podSpec.ServiceAccountName = taskExecutionMetadata.GetK8sServiceAccount() + } + if len(podSpec.SchedulerName) == 0 { + podSpec.SchedulerName = config.GetK8sPluginConfig().SchedulerName + } + podSpec.NodeSelector = utils.UnionMaps(config.GetK8sPluginConfig().DefaultNodeSelector, podSpec.NodeSelector) + if taskExecutionMetadata.IsInterruptible() { + podSpec.NodeSelector = utils.UnionMaps(podSpec.NodeSelector, config.GetK8sPluginConfig().InterruptibleNodeSelector) + } + if podSpec.Affinity == nil && config.GetK8sPluginConfig().DefaultAffinity != nil { + podSpec.Affinity = config.GetK8sPluginConfig().DefaultAffinity.DeepCopy() + } + if podSpec.SecurityContext == nil && config.GetK8sPluginConfig().DefaultPodSecurityContext != nil { + podSpec.SecurityContext = config.GetK8sPluginConfig().DefaultPodSecurityContext.DeepCopy() + } + if config.GetK8sPluginConfig().EnableHostNetworkingPod != nil { + podSpec.HostNetwork = *config.GetK8sPluginConfig().EnableHostNetworkingPod + } + if podSpec.DNSConfig == nil && config.GetK8sPluginConfig().DefaultPodDNSConfig != nil { + podSpec.DNSConfig = config.GetK8sPluginConfig().DefaultPodDNSConfig.DeepCopy() + } + ApplyInterruptibleNodeAffinity(taskExecutionMetadata.IsInterruptible(), podSpec) +} + +func mergeMapInto(src map[string]string, dst map[string]string) { + for key, value := range src { + dst[key] = value + } +} + +// BuildRawPod constructs a PodSpec and ObjectMeta based on the definition passed by the TaskExecutionContext. This +// definition does not include any configuration injected by Flyte. +func BuildRawPod(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v1.PodSpec, *metav1.ObjectMeta, string, error) { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + logger.Warnf(ctx, "failed to read task information when trying to construct Pod, err: %s", err.Error()) + return nil, nil, "", err + } + + var podSpec *v1.PodSpec + objectMeta := metav1.ObjectMeta{ + Annotations: make(map[string]string), + Labels: make(map[string]string), + } + primaryContainerName := "" + + switch target := taskTemplate.GetTarget().(type) { + case *core.TaskTemplate_Container: + // handles tasks defined by a single container + c, err := BuildRawContainer(ctx, tCtx) + if err != nil { + return nil, nil, "", err + } + + primaryContainerName = c.Name + podSpec = &v1.PodSpec{ + Containers: []v1.Container{ + *c, + }, + } + + // handle pod template override + podTemplate := tCtx.TaskExecutionMetadata().GetOverrides().GetPodTemplate() + if podTemplate.GetPodSpec() != nil { + podSpec, objectMeta, err = ApplyPodTemplateOverride(objectMeta, podTemplate) + if err != nil { + return nil, nil, "", err + } + primaryContainerName = podTemplate.GetPrimaryContainerName() + } + + case *core.TaskTemplate_K8SPod: + // handles pod tasks that marshal the pod spec to the k8s_pod task target. + if target.K8SPod.PodSpec == nil { + return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "Pod tasks with task type version > 1 should specify their target as a K8sPod with a defined pod spec") + } + + err := utils.UnmarshalStructToObj(target.K8SPod.PodSpec, &podSpec) + if err != nil { + return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "Unable to unmarshal task k8s pod [%v], Err: [%v]", target.K8SPod.PodSpec, err.Error()) + } + + // get primary container name + var ok bool + if primaryContainerName, ok = taskTemplate.GetConfig()[PrimaryContainerKey]; !ok { + return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "invalid TaskSpecification, config missing [%s] key in [%v]", PrimaryContainerKey, taskTemplate.GetConfig()) + } + + // update annotations and labels + if taskTemplate.GetK8SPod().Metadata != nil { + mergeMapInto(target.K8SPod.Metadata.Annotations, objectMeta.Annotations) + mergeMapInto(target.K8SPod.Metadata.Labels, objectMeta.Labels) + } + + // handle pod template override + podTemplate := tCtx.TaskExecutionMetadata().GetOverrides().GetPodTemplate() + if podTemplate.GetPodSpec() != nil { + podSpec, objectMeta, err = ApplyPodTemplateOverride(objectMeta, podTemplate) + if err != nil { + return nil, nil, "", err + } + primaryContainerName = podTemplate.GetPrimaryContainerName() + } + + default: + return nil, nil, "", pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "invalid TaskSpecification, unable to determine Pod configuration") + } + + return podSpec, &objectMeta, primaryContainerName, nil +} + +func hasExternalLinkType(taskTemplate *core.TaskTemplate) bool { + if taskTemplate == nil { + return false + } + config := taskTemplate.GetConfig() + if config == nil { + return false + } + // The presence of any "link_type" is sufficient to guarantee that the console URL should be included. + _, exists := config["link_type"] + return exists +} + +// ApplyFlytePodConfiguration updates the PodSpec and ObjectMeta with various Flyte configuration. This includes +// applying default k8s configuration, applying overrides (resources etc.), injecting copilot containers, and merging with the +// configuration PodTemplate (if exists). +func ApplyFlytePodConfiguration(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, primaryContainerName string) (*v1.PodSpec, *metav1.ObjectMeta, error) { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + logger.Warnf(ctx, "failed to read task information when trying to construct Pod, err: %s", err.Error()) + return nil, nil, err + } + + // add flyte resource customizations to containers + templateParameters := template.Parameters{ + Inputs: tCtx.InputReader(), + OutputPath: tCtx.OutputWriter(), + Task: tCtx.TaskReader(), + TaskExecMetadata: tCtx.TaskExecutionMetadata(), + IncludeConsoleURL: hasExternalLinkType(taskTemplate), + } + + // Merge overrides with base extended resources + extendedResources := ApplyExtendedResourcesOverrides( + taskTemplate.GetExtendedResources(), + tCtx.TaskExecutionMetadata().GetOverrides().GetExtendedResources(), + ) + + // iterate over the initContainers first + for index := range podSpec.InitContainers { + var resourceMode = ResourceCustomizationModeEnsureExistingResourcesInRange + + if err := AddFlyteCustomizationsToContainer(ctx, templateParameters, resourceMode, &podSpec.InitContainers[index], extendedResources); err != nil { + return nil, nil, err + } + } + + resourceRequests := make([]v1.ResourceRequirements, 0, len(podSpec.Containers)) + var primaryContainer *v1.Container + for index, container := range podSpec.Containers { + var resourceMode = ResourceCustomizationModeEnsureExistingResourcesInRange + if container.Name == primaryContainerName { + resourceMode = ResourceCustomizationModeMergeExistingResources + } + + if err := AddFlyteCustomizationsToContainer(ctx, templateParameters, resourceMode, &podSpec.Containers[index], extendedResources); err != nil { + return nil, nil, err + } + + resourceRequests = append(resourceRequests, podSpec.Containers[index].Resources) + if container.Name == primaryContainerName { + primaryContainer = &podSpec.Containers[index] + } + } + + if primaryContainer == nil { + return nil, nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "invalid TaskSpecification, primary container [%s] not defined", primaryContainerName) + } + + // add copilot configuration to primaryContainer and PodSpec (if necessary) + var dataLoadingConfig *core.DataLoadingConfig + if container := taskTemplate.GetContainer(); container != nil { + dataLoadingConfig = container.GetDataConfig() + } else if pod := taskTemplate.GetK8SPod(); pod != nil { + dataLoadingConfig = pod.GetDataConfig() + } + + primaryInitContainerName := "" + + if dataLoadingConfig != nil { + if err := AddCoPilotToContainer(ctx, config.GetK8sPluginConfig().CoPilot, + primaryContainer, taskTemplate.Interface, dataLoadingConfig); err != nil { + return nil, nil, err + } + + if err := AddCoPilotToPod(ctx, config.GetK8sPluginConfig().CoPilot, podSpec, taskTemplate.GetInterface(), + tCtx.TaskExecutionMetadata(), tCtx.InputReader(), tCtx.OutputWriter(), dataLoadingConfig); err != nil { + return nil, nil, err + } + } + + // update primaryContainer and PodSpec with k8s plugin configuration, etc + UpdatePod(tCtx.TaskExecutionMetadata(), resourceRequests, podSpec) + if primaryContainer.SecurityContext == nil && config.GetK8sPluginConfig().DefaultSecurityContext != nil { + primaryContainer.SecurityContext = config.GetK8sPluginConfig().DefaultSecurityContext.DeepCopy() + } + + // Apply device-class-specific PodTemplate (if applicable) + // This provides device-specific defaults while allowing task configs to override + podSpec, err = applyAcceleratorDeviceClassPodTemplate(ctx, podSpec, extendedResources, primaryContainerName, primaryInitContainerName) + if err != nil { + return nil, nil, err + } + + // merge PodSpec and ObjectMeta with configuration pod template (if exists) + podSpec, objectMeta, err = MergeWithBasePodTemplate(ctx, tCtx, podSpec, objectMeta, primaryContainerName, primaryInitContainerName) + if err != nil { + return nil, nil, err + } + + // TODO @pvditt + //if propellerCfg.GetConfig().AcceleratedInputs.Enabled { + // ApplyAcceleratedInputsSpec(podSpec, primaryContainerName) + //} + + // GPU accelerator + if extendedResources.GetGpuAccelerator() != nil { + ApplyGPUNodeSelectors(podSpec, extendedResources.GetGpuAccelerator()) + } + + // Shared memory volume + if extendedResources.GetSharedMemory() != nil { + err = ApplySharedMemory(podSpec, primaryContainerName, extendedResources.GetSharedMemory()) + if err != nil { + return nil, nil, err + } + } + + // Override container image if necessary + if len(tCtx.TaskExecutionMetadata().GetOverrides().GetContainerImage()) > 0 { + ApplyContainerImageOverride(podSpec, tCtx.TaskExecutionMetadata().GetOverrides().GetContainerImage(), primaryContainerName) + } + + return podSpec, objectMeta, nil +} + +func IsVscodeEnabled(ctx context.Context, envVar []v1.EnvVar) bool { + for _, env := range envVar { + if env.Name != FlyteEnableVscode { + continue + } + var err error + enableVscode, err := strconv.ParseBool(env.Value) + if err != nil { + logger.Errorf(ctx, "failed to parse %s env var: [%s]", FlyteEnableVscode, env.Value) + return false + } + return enableVscode + } + return false +} + +func ApplyContainerImageOverride(podSpec *v1.PodSpec, containerImage string, primaryContainerName string) { + for i, c := range podSpec.Containers { + if c.Name == primaryContainerName { + podSpec.Containers[i].Image = containerImage + return + } + } +} + +func ApplyPodTemplateOverride(objectMeta metav1.ObjectMeta, podTemplate *core.K8SPod) (*v1.PodSpec, metav1.ObjectMeta, error) { + if podTemplate.GetMetadata().GetAnnotations() != nil { + mergeMapInto(podTemplate.GetMetadata().GetAnnotations(), objectMeta.Annotations) + } + if podTemplate.GetMetadata().GetLabels() != nil { + mergeMapInto(podTemplate.GetMetadata().GetLabels(), objectMeta.Labels) + } + + var podSpecOverride *v1.PodSpec + err := utils.UnmarshalStructToObj(podTemplate.GetPodSpec(), &podSpecOverride) + if err != nil { + return nil, objectMeta, err + } + + return podSpecOverride, objectMeta, nil +} + +func addTolerationInPodSpec(podSpec *v1.PodSpec, toleration *v1.Toleration) *v1.PodSpec { + podTolerations := podSpec.Tolerations + + var newTolerations []v1.Toleration + for i := range podTolerations { + if toleration.MatchToleration(&podTolerations[i]) { + return podSpec + } + newTolerations = append(newTolerations, podTolerations[i]) + } + newTolerations = append(newTolerations, *toleration) + podSpec.Tolerations = newTolerations + return podSpec +} + +func AddTolerationsForExtendedResources(podSpec *v1.PodSpec) *v1.PodSpec { + if podSpec == nil { + podSpec = &v1.PodSpec{} + } + + resources := sets.NewString() + for _, container := range podSpec.Containers { + for _, extendedResource := range config.GetK8sPluginConfig().AddTolerationsForExtendedResources { + if _, ok := container.Resources.Requests[v1.ResourceName(extendedResource)]; ok { + resources.Insert(extendedResource) + } + } + } + + for _, container := range podSpec.InitContainers { + for _, extendedResource := range config.GetK8sPluginConfig().AddTolerationsForExtendedResources { + if _, ok := container.Resources.Requests[v1.ResourceName(extendedResource)]; ok { + resources.Insert(extendedResource) + } + } + } + + for _, resource := range resources.List() { + addTolerationInPodSpec(podSpec, &v1.Toleration{ + Key: resource, + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }) + } + + return podSpec +} + +// ToK8sPodSpec builds a PodSpec and ObjectMeta based on the definition passed by the TaskExecutionContext. This +// involves parsing the raw PodSpec definition and applying all Flyte configuration options. +func ToK8sPodSpec(ctx context.Context, tCtx pluginsCore.TaskExecutionContext) (*v1.PodSpec, *metav1.ObjectMeta, string, error) { + // build raw PodSpec and ObjectMeta + podSpec, objectMeta, primaryContainerName, err := BuildRawPod(ctx, tCtx) + if err != nil { + return nil, nil, "", err + } + + // add flyte configuration + podSpec, objectMeta, err = ApplyFlytePodConfiguration(ctx, tCtx, podSpec, objectMeta, primaryContainerName) + if err != nil { + return nil, nil, "", err + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + + return podSpec, objectMeta, primaryContainerName, nil +} + +func GetContainer(podSpec *v1.PodSpec, name string) (*v1.Container, error) { + for _, container := range podSpec.Containers { + if container.Name == name { + return &container, nil + } + } + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "invalid TaskSpecification, container [%s] not defined", name) +} + +// getBasePodTemplate attempts to retrieve the PodTemplate to use as the base for k8s Pod configuration. This value can +// come from one of the following: +// (1) PodTemplate name in the TaskMetadata: This name is then looked up in the PodTemplateStore. +// (2) Default PodTemplate name from configuration: This name is then looked up in the PodTemplateStore. +func getBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, podTemplateStore PodTemplateStore) (*v1.PodTemplate, error) { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "TaskSpecification cannot be read, Err: [%v]", err.Error()) + } + + var podTemplate *v1.PodTemplate + if taskTemplate.Metadata != nil && len(taskTemplate.Metadata.PodTemplateName) > 0 { + // retrieve PodTemplate by name from PodTemplateStore + podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), taskTemplate.Metadata.PodTemplateName) + if podTemplate == nil { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "PodTemplate '%s' does not exist", taskTemplate.Metadata.PodTemplateName) + } + } else { + // check for default PodTemplate + podTemplate = podTemplateStore.LoadOrDefault(tCtx.TaskExecutionMetadata().GetNamespace(), config.GetK8sPluginConfig().DefaultPodTemplateName) + } + + return podTemplate, nil +} + +// MergeWithBasePodTemplate attempts to merge the provided PodSpec and ObjectMeta with the configuration PodTemplate for +// this task. +func MergeWithBasePodTemplate(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, + podSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, primaryContainerName, primaryInitContainerName string) (*v1.PodSpec, *metav1.ObjectMeta, error) { + + // attempt to retrieve base PodTemplate + podTemplate, err := getBasePodTemplate(ctx, tCtx, DefaultPodTemplateStore) + if err != nil { + return nil, nil, err + } else if podTemplate == nil { + // if no PodTemplate to merge as base -> return + return podSpec, objectMeta, nil + } + + // merge podTemplate onto podSpec + templateSpec := &podTemplate.Template.Spec + mergedPodSpec, err := MergeBasePodSpecOntoTemplate(templateSpec, podSpec, primaryContainerName, primaryInitContainerName) + if err != nil { + return nil, nil, err + } + + // merge PodTemplate PodSpec with podSpec + var mergedObjectMeta *metav1.ObjectMeta = podTemplate.Template.ObjectMeta.DeepCopy() + if err := mergo.Merge(mergedObjectMeta, objectMeta, mergo.WithOverride, mergo.WithAppendSlice); err != nil { + return nil, nil, err + } + + return mergedPodSpec, mergedObjectMeta, nil +} + +// MergeBasePodSpecOntoTemplate merges a base pod spec onto a template pod spec. The template pod spec has some +// magic values that allow users to specify templates that target all containers and primary containers. Aside from +// magic values this method will merge containers that have matching names. +func MergeBasePodSpecOntoTemplate(templatePodSpec *v1.PodSpec, basePodSpec *v1.PodSpec, primaryContainerName string, primaryInitContainerName string) (*v1.PodSpec, error) { + if templatePodSpec == nil || basePodSpec == nil { + return nil, errors.New("neither the templatePodSpec or the basePodSpec can be nil") + } + + // extract primaryContainerTemplate. The base should always contain the primary container. + var defaultContainerTemplate, primaryContainerTemplate *v1.Container + + // extract default container template + for i := 0; i < len(templatePodSpec.Containers); i++ { + if templatePodSpec.Containers[i].Name == defaultContainerTemplateName { + defaultContainerTemplate = &templatePodSpec.Containers[i] + } else if templatePodSpec.Containers[i].Name == primaryContainerTemplateName { + primaryContainerTemplate = &templatePodSpec.Containers[i] + } + } + + // extract primaryInitContainerTemplate. The base should always contain the primary container. + var defaultInitContainerTemplate, primaryInitContainerTemplate *v1.Container + + // extract defaultInitContainerTemplate + for i := 0; i < len(templatePodSpec.InitContainers); i++ { + if templatePodSpec.InitContainers[i].Name == defaultInitContainerTemplateName { + defaultInitContainerTemplate = &templatePodSpec.InitContainers[i] + } else if templatePodSpec.InitContainers[i].Name == primaryInitContainerTemplateName { + primaryInitContainerTemplate = &templatePodSpec.InitContainers[i] + } + } + + // Merge base into template + mergedPodSpec := templatePodSpec.DeepCopy() + if err := mergo.Merge(mergedPodSpec, basePodSpec, mergo.WithOverride, mergo.WithAppendSlice); err != nil { + return nil, err + } + + // merge PodTemplate containers + var mergedContainers []v1.Container + for _, container := range basePodSpec.Containers { + // if applicable start with defaultContainerTemplate + var mergedContainer *v1.Container + if defaultContainerTemplate != nil { + mergedContainer = defaultContainerTemplate.DeepCopy() + } + + // If this is a primary container handle the template + if container.Name == primaryContainerName && primaryContainerTemplate != nil { + if mergedContainer == nil { + mergedContainer = primaryContainerTemplate.DeepCopy() + } else { + err := mergo.Merge(mergedContainer, primaryContainerTemplate, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + + // Check for any name matching template containers + for i, templateContainer := range templatePodSpec.Containers { + if templateContainer.Name != container.Name { + continue + } + + if mergedContainer == nil { + mergedContainer = &templatePodSpec.Containers[i] + } else { + err := mergo.Merge(mergedContainer, templateContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + + // Merge in the base container + if mergedContainer == nil { + mergedContainer = container.DeepCopy() + } else { + err := mergo.Merge(mergedContainer, container, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + + mergedContainers = append(mergedContainers, *mergedContainer) + + } + + mergedPodSpec.Containers = mergedContainers + + // merge PodTemplate init containers + var mergedInitContainers []v1.Container + for _, initContainer := range basePodSpec.InitContainers { + // if applicable start with defaultContainerTemplate + var mergedInitContainer *v1.Container + if defaultInitContainerTemplate != nil { + mergedInitContainer = defaultInitContainerTemplate.DeepCopy() + } + + // If this is a primary init container handle the template + if initContainer.Name == primaryInitContainerName && primaryInitContainerTemplate != nil { + if mergedInitContainer == nil { + mergedInitContainer = primaryInitContainerTemplate.DeepCopy() + } else { + err := mergo.Merge(mergedInitContainer, primaryInitContainerTemplate, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + + // Check for any name matching template containers + for i, templateInitContainer := range templatePodSpec.InitContainers { + if templateInitContainer.Name != initContainer.Name { + continue + } + + if mergedInitContainer == nil { + mergedInitContainer = &templatePodSpec.InitContainers[i] + } else { + err := mergo.Merge(mergedInitContainer, templateInitContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + + // Merge in the base init container + if mergedInitContainer == nil { + mergedInitContainer = initContainer.DeepCopy() + } else { + err := mergo.Merge(mergedInitContainer, initContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + + mergedInitContainers = append(mergedInitContainers, *mergedInitContainer) + } + + mergedPodSpec.InitContainers = mergedInitContainers + + return mergedPodSpec, nil +} + +// MergeOverlayPodSpecOntoBase merges a customized pod spec onto a base pod spec. At a container level it will +// merge containers that have matching names. +func MergeOverlayPodSpecOntoBase(basePodSpec *v1.PodSpec, overlayPodSpec *v1.PodSpec) (*v1.PodSpec, error) { + if basePodSpec == nil || overlayPodSpec == nil { + return nil, errors.New("neither the basePodSpec or the overlayPodSpec can be nil") + } + + mergedPodSpec := basePodSpec.DeepCopy() + if err := mergo.Merge(mergedPodSpec, overlayPodSpec, mergo.WithOverride, mergo.WithAppendSlice); err != nil { + return nil, err + } + + // merge PodTemplate containers + var mergedContainers []v1.Container + for _, container := range basePodSpec.Containers { + + mergedContainer := container.DeepCopy() + + for _, overlayContainer := range overlayPodSpec.Containers { + if mergedContainer.Name == overlayContainer.Name { + err := mergo.Merge(mergedContainer, overlayContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + mergedContainers = append(mergedContainers, *mergedContainer) + } + + mergedPodSpec.Containers = mergedContainers + + // merge PodTemplate init containers + var mergedInitContainers []v1.Container + for _, initContainer := range basePodSpec.InitContainers { + + mergedInitContainer := initContainer.DeepCopy() + + for _, overlayInitContainer := range overlayPodSpec.InitContainers { + if mergedInitContainer.Name == overlayInitContainer.Name { + err := mergo.Merge(mergedInitContainer, overlayInitContainer, mergo.WithOverride, mergo.WithAppendSlice) + if err != nil { + return nil, err + } + } + } + mergedInitContainers = append(mergedInitContainers, *mergedInitContainer) + } + + mergedPodSpec.InitContainers = mergedInitContainers + + return mergedPodSpec, nil +} + +// applyAcceleratorDeviceClassPodTemplate applies device-class-specific PodTemplate configuration +// to the provided PodSpec using MergeBasePodSpecOntoTemplate. The device class PodTemplate serves as a base, +// with task PodSpec values taking precedence. +// +// See AcceleratorDeviceClassConfig.PodTemplate documentation for detailed merge semantics, +// precedence rules, and container template support. +func applyAcceleratorDeviceClassPodTemplate( + ctx context.Context, + podSpec *v1.PodSpec, + extendedResources *core.ExtendedResources, + primaryContainerName string, + primaryInitContainerName string, +) (*v1.PodSpec, error) { + if extendedResources == nil || extendedResources.GetGpuAccelerator() == nil { + return podSpec, nil + } + + gpuAccelerator := extendedResources.GetGpuAccelerator() + accelConfig := getAcceleratorConfig(gpuAccelerator) + + if accelConfig.PodTemplate == nil { + return podSpec, nil + } + + deviceClass := gpuAccelerator.GetDeviceClass().String() + logger.Infof(ctx, "Applying PodTemplate for accelerator device class: %s", deviceClass) + + // Use MergeBasePodSpecOntoTemplate with device class pod template as base + mergedPodSpec, err := MergeBasePodSpecOntoTemplate(&accelConfig.PodTemplate.Template.Spec, podSpec, primaryContainerName, primaryInitContainerName) + if err != nil { + return nil, pluginserrors.Wrapf( + pluginserrors.DownstreamSystemError, + err, + "Failed to merge PodTemplate for accelerator device class '%s'. "+ + "Check k8s.accelerator-device-classes[%s].pod-template configuration.", + deviceClass, deviceClass, + ) + } + + return mergedPodSpec, nil +} + +// TODO @pvditt +//func ApplyAcceleratedInputsSpec(spec *v1.PodSpec, primaryName string) { +// cfg := propellerCfg.GetConfig().AcceleratedInputs +// hostPathType := v1.HostPathDirectory +// spec.Volumes = append(spec.Volumes, v1.Volume{ +// Name: "union-persistent-data-volume", +// VolumeSource: v1.VolumeSource{ +// HostPath: &v1.HostPathVolumeSource{ +// Path: cfg.VolumePath, +// Type: &hostPathType, +// }, +// }, +// }) +// for i, cont := range spec.Containers { +// if cont.Name == primaryName { +// spec.Containers[i].VolumeMounts = append(cont.VolumeMounts, v1.VolumeMount{ +// Name: "union-persistent-data-volume", +// ReadOnly: true, +// MountPath: cfg.LocalPathPrefix, +// }) +// } +// } +//} + +func BuildIdentityPod() *v1.Pod { + return &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + Kind: PodKind, + APIVersion: v1.SchemeGroupVersion.String(), + }, + } +} + +// DemystifyPending is one the core functions, that helps FlytePropeller determine if a pending pod is indeed pending, +// or it is actually stuck in a un-reparable state. In such a case the pod should be marked as dead and the task should +// be retried. This has to be handled sadly, as K8s is still largely designed for long running services that should +// recover from failures, but Flyte pods are completely automated and should either run or fail +// Important considerations. +// Pending Status in Pod could be for various reasons and sometimes could signal a problem +// Case I: Pending because the Image pull is failing and it is backing off +// +// This could be transient. So we can actually rely on the failure reason. +// The failure transitions from ErrImagePull -> ImagePullBackoff +// +// Case II: Not enough resources are available. This is tricky. It could be that the total number of +// +// resources requested is beyond the capability of the system. for this we will rely on configuration +// and hence input gates. We should not allow bad requests that Request for large number of resource through. +// In the case it makes through, we will fail after timeout +func DemystifyPending(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) { + phaseInfo, t := demystifyPendingHelper(status, info) + + if phaseInfo.Phase().IsTerminal() { + return phaseInfo, nil + } + + podPendingTimeout := config.GetK8sPluginConfig().PodPendingTimeout.Duration + if podPendingTimeout > 0 && time.Since(t) >= podPendingTimeout { + return pluginsCore.PhaseInfoSystemRetryableFailureWithCleanup("PodPendingTimeout", phaseInfo.Reason(), &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), nil + } + + if phaseInfo.Phase() != pluginsCore.PhaseUndefined { + return phaseInfo, nil + } + + return pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling", phaseInfo.Info()), nil +} + +// DemystifyFailedOrPendingPod inspects the pod status of a failed or pending pod and attempts to determine the reason +// for the failure or pending state. This function currently only handles pods in the Failed or Pending phase. +// This is useful to check the specific error from the pod in the rayjob, sparkjob, etc. +// For example, if the pod is in the Failed phase due to an image pull error, this function can return a more specific +// error message indicating that the image could not be pulled. +// Similarly, if the pod is in the Pending phase due to insufficient resources, this function can return an error +// message indicating that the pod could not be scheduled due to lack of resources. +func DemystifyFailedOrPendingPod( + ctx context.Context, + pluginContext k8s.PluginContext, + info pluginsCore.TaskInfo, + namespace string, + podName string, + primaryContainerName string, +) (pluginsCore.PhaseInfo, error) { + pod := &v1.Pod{} + var phaseInfo pluginsCore.PhaseInfo + + err := pluginContext.K8sReader().Get(ctx, client.ObjectKey{Namespace: namespace, Name: podName}, pod) + if err != nil { + logger.Debugf(ctx, "Failed to get pod %s in namespace %s. Error: %v", podName, namespace, err) + } + switch pod.Status.Phase { + case v1.PodFailed: + phaseInfo, err = DemystifyFailure(ctx, pod.Status, info, primaryContainerName) + case v1.PodPending: + phaseInfo, err = DemystifyPending(pod.Status, info) + } + return phaseInfo, err +} + +func demystifyPendingHelper(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, time.Time) { + // Search over the difference conditions in the status object. Note that the 'Pending' this function is + // demystifying is the 'phase' of the pod status. This is different than the PodReady condition type also used below + phaseInfo := pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "Demistify Pending", &info) + + t := time.Now() + for _, c := range status.Conditions { + t = c.LastTransitionTime.Time + switch c.Type { + case v1.PodScheduled: + if c.Status == v1.ConditionFalse { + // Waiting to be scheduled. This usually refers to inability to acquire resources. + return pluginsCore.PhaseInfoQueuedWithTaskInfo(t, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message), phaseInfo.Info()), t + } + + case v1.PodReasonUnschedulable: + // We Ignore case in which we are unable to find resources on the cluster. This is because + // - The resources may be not available at the moment, but may become available eventually + // The pod scheduler will keep on looking at this pod and trying to satisfy it. + // + // Pod status looks like this: + // message: '0/1 nodes are available: 1 Insufficient memory.' + // reason: Unschedulable + // status: "False" + // type: PodScheduled + return pluginsCore.PhaseInfoQueuedWithTaskInfo(t, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("%s:%s", c.Reason, c.Message), phaseInfo.Info()), t + + case v1.PodReady: + if c.Status == v1.ConditionFalse { + // This happens in the case the image is having some problems. In the following example, K8s is having + // problems downloading an image. To ensure that, we will have to iterate over all the container statuses and + // find if some container has imagepull failure + // e.g. + // - lastProbeTime: null + // lastTransitionTime: 2018-12-18T00:57:30Z + // message: 'containers with unready status: [myapp-container]' + // reason: ContainersNotReady + // status: "False" + // type: Ready + // + // e.g. Container status + // - image: blah + // imageID: "" + // lastState: {} + // name: myapp-container + // ready: false + // restartCount: 0 + // state: + // waiting: + // message: Back-off pulling image "blah" + // reason: ImagePullBackOff + for _, containerStatus := range status.ContainerStatuses { + if !containerStatus.Ready { + if containerStatus.State.Waiting != nil { + // There are a variety of reasons that can cause a pod to be in this waiting state. + // Waiting state may be legitimate when the container is being downloaded, started or init containers are running + reason := containerStatus.State.Waiting.Reason + finalReason := fmt.Sprintf("%s|%s", c.Reason, reason) + finalMessage := fmt.Sprintf("%s|%s", c.Message, containerStatus.State.Waiting.Message) + switch reason { + case "ErrImagePull", "ContainerCreating", "PodInitializing": + // But, there are only two "reasons" when a pod is successfully being created and hence it is in + // waiting state + // Refer to https://github.com/kubernetes/kubernetes/blob/master/pkg/kubelet/kubelet_pods.go + // and look for the default waiting states + // We also want to allow Image pulls to be retried, so ErrImagePull will be ignored + // as it eventually enters into ImagePullBackOff + // ErrImagePull -> Transitionary phase to ImagePullBackOff + // ContainerCreating -> Image is being downloaded + // PodInitializing -> Init containers are running + return pluginsCore.PhaseInfoInitializing(t, pluginsCore.DefaultPhaseVersion, fmt.Sprintf("[%s]: %s", finalReason, finalMessage), &pluginsCore.TaskInfo{OccurredAt: &t}), t + + case "CreateContainerError": + // This may consist of: + // 1. Transient errors: e.g. failed to reserve + // container name, container name [...] already in use + // by container + // 2. Permanent errors: e.g. no command specified + // To handle both types of errors gracefully without + // arbitrary pattern matching in the message, we simply + // allow a grace period for kubelet to resolve + // transient issues with the container runtime. If the + // error persists beyond this time, the corresponding + // task is marked as failed. + // NOTE: The current implementation checks for a timeout + // by comparing the condition's LastTransitionTime + // based on the corresponding kubelet's clock with the + // current time based on FlytePropeller's clock. This + // is not ideal given that these 2 clocks are NOT + // synced, and therefore, only provides an + // approximation of the elapsed time since the last + // transition. + + gracePeriod := config.GetK8sPluginConfig().CreateContainerErrorGracePeriod.Duration + if time.Since(t) >= gracePeriod { + return pluginsCore.PhaseInfoFailureWithCleanup(finalReason, GetMessageAfterGracePeriod(finalMessage, gracePeriod), &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), t + } + return pluginsCore.PhaseInfoInitializing( + t, + pluginsCore.DefaultPhaseVersion, + fmt.Sprintf("[%s]: %s", finalReason, finalMessage), + &pluginsCore.TaskInfo{OccurredAt: &t}, + ), t + + case "CreateContainerConfigError": + gracePeriod := config.GetK8sPluginConfig().CreateContainerConfigErrorGracePeriod.Duration + if time.Since(t) >= gracePeriod { + return pluginsCore.PhaseInfoFailureWithCleanup(finalReason, GetMessageAfterGracePeriod(finalMessage, gracePeriod), &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), t + } + return pluginsCore.PhaseInfoInitializing( + t, + pluginsCore.DefaultPhaseVersion, + fmt.Sprintf("[%s]: %s", finalReason, finalMessage), + &pluginsCore.TaskInfo{OccurredAt: &t}, + ), t + + case "InvalidImageName": + return pluginsCore.PhaseInfoFailureWithCleanup(finalReason, finalMessage, &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), t + + case "ImagePullBackOff": + gracePeriod := config.GetK8sPluginConfig().ImagePullBackoffGracePeriod.Duration + if time.Since(t) >= gracePeriod { + return pluginsCore.PhaseInfoRetryableFailureWithCleanup(finalReason, GetMessageAfterGracePeriod(finalMessage, gracePeriod), &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), t + } + + return pluginsCore.PhaseInfoInitializing( + t, + pluginsCore.DefaultPhaseVersion, + fmt.Sprintf("[%s]: %s", finalReason, finalMessage), + &pluginsCore.TaskInfo{OccurredAt: &t}, + ), t + + default: + // Since we are not checking for all error states, we may end up perpetually + // in the queued state returned at the bottom of this function, until the Pod is reaped + // by K8s and we get elusive 'pod not found' errors + // So be default if the container is not waiting with the PodInitializing/ContainerCreating + // reasons, then we will assume a failure reason, and fail instantly + return pluginsCore.PhaseInfoSystemRetryableFailureWithCleanup(finalReason, finalMessage, &pluginsCore.TaskInfo{ + OccurredAt: &t, + }), t + } + } + } + } + } + } + } + + return phaseInfo, t +} + +func GetMessageAfterGracePeriod(message string, gracePeriod time.Duration) string { + return fmt.Sprintf("Grace period [%s] exceeded|%s", gracePeriod, message) +} + +func DemystifySuccess(status v1.PodStatus, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) { + for _, status := range append( + append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) { + if status.State.Terminated != nil && strings.Contains(status.State.Terminated.Reason, OOMKilled) { + return pluginsCore.PhaseInfoRetryableFailure(OOMKilled, + "Pod reported success despite being OOMKilled", &info), nil + } + } + return pluginsCore.PhaseInfoSuccess(&info), nil +} + +// DeterminePrimaryContainerPhase as the name suggests, given all the containers, will return a pluginsCore.PhaseInfo object +// corresponding to the phase of the primaryContainer which is identified using the provided name. +// This is useful in case of sidecars or pod jobs, where Flyte will monitor successful exit of a single container. +func DeterminePrimaryContainerPhase(ctx context.Context, primaryContainerName string, statuses []v1.ContainerStatus, info *pluginsCore.TaskInfo) pluginsCore.PhaseInfo { + for _, s := range statuses { + if s.Name == primaryContainerName { + if s.State.Waiting != nil || s.State.Running != nil { + return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info) + } + + if s.State.Terminated != nil { + message := fmt.Sprintf("\r\n[%v] terminated with exit code (%v). Reason [%v]. Message: \n%v.", + s.Name, + s.State.Terminated.ExitCode, + s.State.Terminated.Reason, + s.State.Terminated.Message) + + var phaseInfo pluginsCore.PhaseInfo + switch { + case strings.Contains(s.State.Terminated.Reason, OOMKilled): + // OOMKilled typically results in a SIGKILL signal, but we classify it as a user error + phaseInfo = pluginsCore.PhaseInfoRetryableFailure( + s.State.Terminated.Reason, message, info) + case isTerminatedWithSigKill(s.State): + // If the primary container exited with SIGKILL, we treat it as a system-level error + // (such as node termination or preemption). This best-effort approach accepts some false positives. + // In the case that node preemption terminates the kubelet *before* the kubelet is able to persist + // the pod's state to the Kubernetes API server, we rely on Kubernetes to eventually resolve + // the state. This will enable Propeller to eventually query the API server and determine that + // the pod no longer exists, which will then be counted as a system error. + phaseInfo = pluginsCore.PhaseInfoSystemRetryableFailure( + s.State.Terminated.Reason, message, info) + case s.State.Terminated.ExitCode != 0: + phaseInfo = pluginsCore.PhaseInfoRetryableFailure( + s.State.Terminated.Reason, message, info) + default: + return pluginsCore.PhaseInfoSuccess(info) + } + + logger.Warnf(ctx, "Primary container terminated with issue. Message: '%s'", message) + return phaseInfo + } + } + } + + // If for some reason we can't find the primary container, always just return a permanent failure + return pluginsCore.PhaseInfoFailure(PrimaryContainerNotFound, + fmt.Sprintf("Primary container [%s] not found in pod's container statuses", primaryContainerName), info) +} + +// DemystifyFailure resolves the various Kubernetes pod failure modes to determine +// the most appropriate course of action +func DemystifyFailure(ctx context.Context, status v1.PodStatus, info pluginsCore.TaskInfo, primaryContainerName string) (pluginsCore.PhaseInfo, error) { + code := "UnknownError" + message := "Pod failed. No message received from kubernetes." + if len(status.Reason) > 0 { + code = status.Reason + } + + if len(status.Message) > 0 { + message = status.Message + } + + // + // Handle known pod statuses + // + // This is useful for handling node interruption events + // which can be different between providers and versions of Kubernetes. Given that + // we don't have a consistent way of detecting interruption events, we will be + // documenting all possibilities as follows. We will also be handling these as + // system retryable failures that do not count towards user-specified task retries, + // for now. This is required for FlytePropeller to correctly transition + // interruptible nodes to non-interruptible ones after the + // `interruptible-failure-threshold` is exceeded. See: + // https://github.com/flyteorg/flytepropeller/blob/a3c6e91f19c19601a957b29891437112868845de/pkg/controller/nodes/node_exec_context.go#L213 + + // GKE (>= v1.20) Kubelet graceful node shutdown + // See: https://cloud.google.com/kubernetes-engine/docs/how-to/preemptible-vms#graceful-shutdown + // Cloud audit log for patch of Pod object during graceful node shutdown: + // request: { + // @type: "k8s.io/Patch" + // status: { + // conditions: null + // message: "Pod Node is in progress of shutting down, not admitting any new pods" + // phase: "Failed" + // qosClass: null + // reason: "Shutdown" + // startTime: "2022-01-30T14:24:07Z" + // } + // } + // + + var isSystemError bool + // In some versions of GKE the reason can also be "Terminated" or "NodeShutdown" + if retryableStatusReasons.Has(code) { + isSystemError = true + } + + // + // Handle known container statuses + // + for _, c := range append( + append(status.InitContainerStatuses, status.ContainerStatuses...), status.EphemeralContainerStatuses...) { + var containerState v1.ContainerState + if c.LastTerminationState.Terminated != nil { + containerState = c.LastTerminationState + } else if c.State.Terminated != nil { + containerState = c.State + } + if containerState.Terminated != nil { + if strings.Contains(containerState.Terminated.Reason, OOMKilled) { + code = OOMKilled + } else if isTerminatedWithSigKill(containerState) { + // in some setups, node termination sends SIGKILL to all the containers running on that node. Capturing and + // tagging that correctly. + code = Interrupted + // If the primary container exited with SIGKILL, we treat it as a system-level error + // (such as node termination or preemption). This best-effort approach accepts some false positives. + // In the case that node preemption terminates the kubelet *before* the kubelet is able to persist + // the pod's state to the Kubernetes API server, we rely on Kubernetes to eventually resolve + // the state. This will enable Propeller to eventually query the API server and determine that + // the pod no longer exists, which will then be counted as a system error. + if c.Name == primaryContainerName { + isSystemError = true + } + } + + if containerState.Terminated.ExitCode == 0 { + message += fmt.Sprintf("\r\n[%v] terminated with ExitCode 0.", c.Name) + } else { + message += fmt.Sprintf("\r\n[%v] terminated with exit code (%v). Reason [%v]. Message: \n%v.", + c.Name, + containerState.Terminated.ExitCode, + containerState.Terminated.Reason, + containerState.Terminated.Message) + } + } + } + + // If the code remains 'UnknownError', it indicates that the kubelet did not have a chance + // to record a more specific failure before the node was terminated or preempted. + // In such cases, we classify the error as system-level and accept false positives + if code == "UnknownError" { + isSystemError = true + } + + if isSystemError { + logger.Warnf(ctx, "Pod failed with a system error. Code: %s, Message: %s", code, message) + return pluginsCore.PhaseInfoSystemRetryableFailure(Interrupted, message, &info), nil + } + + logger.Warnf(ctx, "Pod failed with a user error. Code: %s, Message: %s", code, message) + return pluginsCore.PhaseInfoRetryableFailure(code, message, &info), nil +} + +func GetLastTransitionOccurredAt(pod *v1.Pod) metav1.Time { + var lastTransitionTime metav1.Time + containerStatuses := append(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses...) + for _, containerStatus := range containerStatuses { + if r := containerStatus.State.Running; r != nil { + if r.StartedAt.Unix() > lastTransitionTime.Unix() { + lastTransitionTime = r.StartedAt + } + } else if r := containerStatus.State.Terminated; r != nil { + if r.FinishedAt.Unix() > lastTransitionTime.Unix() { + lastTransitionTime = r.FinishedAt + } + } + } + + if lastTransitionTime.IsZero() { + lastTransitionTime = metav1.NewTime(time.Now()) + } + + return lastTransitionTime +} + +func GetReportedAt(pod *v1.Pod) metav1.Time { + var reportedAt metav1.Time + for _, condition := range pod.Status.Conditions { + if condition.Reason == "PodCompleted" && condition.Type == v1.PodReady && condition.Status == v1.ConditionFalse { + if condition.LastTransitionTime.Unix() > reportedAt.Unix() { + reportedAt = condition.LastTransitionTime + } + } + } + + return reportedAt +} + +func GetPrimaryContainerName(pod *v1.Pod) string { + defaultContainer := pod.Annotations["kubectl.kubernetes.io/default-container"] + if defaultContainer != "" { + return defaultContainer + } + primaryContainer := pod.Annotations[PrimaryContainerKey] + if primaryContainer != "" { + return primaryContainer + } + + for _, container := range pod.Spec.Containers { + if container.Name == pod.Name { + return container.Name + } + } + + // default to just 1st container name + if len(pod.Spec.Containers) > 0 { + return pod.Spec.Containers[0].Name + } + return "" +} + +func makeContainerContexts(statuses []v1.ContainerStatus) []*core.ContainerContext { + ctxs := make([]*core.ContainerContext, len(statuses)) + for i, status := range statuses { + var startTime, endTime *timestamppb.Timestamp + if status.State.Running != nil { + startTime = timestamppb.New(status.State.Running.StartedAt.Time) + } + if status.State.Terminated != nil { + startTime = timestamppb.New(status.State.Terminated.StartedAt.Time) + endTime = timestamppb.New(status.State.Terminated.FinishedAt.Time) + } + ctxs[i] = &core.ContainerContext{ + ContainerName: status.Name, + Process: &core.ContainerContext_ProcessContext{ + ContainerStartTime: startTime, + ContainerEndTime: endTime, + }, + } + } + return ctxs +} + +func BuildPodLogContext(pod *v1.Pod) *core.PodLogContext { + return &core.PodLogContext{ + Namespace: pod.Namespace, + PodName: pod.Name, + PrimaryContainerName: GetPrimaryContainerName(pod), + Containers: makeContainerContexts(pod.Status.ContainerStatuses), + InitContainers: makeContainerContexts(pod.Status.InitContainerStatuses), + } +} + +func isTerminatedWithSigKill(state v1.ContainerState) bool { + return state.Terminated != nil && (state.Terminated.ExitCode == SIGKILL || state.Terminated.ExitCode == unsignedSIGKILL) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go new file mode 100644 index 0000000000..74227836f7 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_helper_test.go @@ -0,0 +1,4093 @@ +package flytek8s + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCoreMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + pluginsIOMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + config1 "github.com/flyteorg/flyte/v2/flytestdlib/config" + "github.com/flyteorg/flyte/v2/flytestdlib/config/viper" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func dummyTaskExecutionMetadata(resources *v1.ResourceRequirements, extendedResources *core.ExtendedResources, containerImage string, podTemplate *core.K8SPod) pluginsCore.TaskExecutionMetadata { + taskExecutionMetadata := &pluginsCoreMock.TaskExecutionMetadata{} + taskExecutionMetadata.On("GetNamespace").Return("test-namespace") + taskExecutionMetadata.On("GetAnnotations").Return(map[string]string{"annotation-1": "val1"}) + taskExecutionMetadata.On("GetLabels").Return(map[string]string{"label-1": "val1"}) + taskExecutionMetadata.On("GetOwnerReference").Return(metav1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.On("GetK8sServiceAccount").Return("service-account") + tID := &pluginsCoreMock.TaskExecutionID{} + tID.On("GetID").Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return("some-acceptable-name") + taskExecutionMetadata.On("GetTaskExecutionID").Return(tID) + + to := &pluginsCoreMock.TaskOverrides{} + to.On("GetResources").Return(resources) + to.On("GetExtendedResources").Return(extendedResources) + to.On("GetContainerImage").Return(containerImage) + to.On("GetPodTemplate").Return(podTemplate) + taskExecutionMetadata.On("GetOverrides").Return(to) + taskExecutionMetadata.On("IsInterruptible").Return(true) + taskExecutionMetadata.OnGetPlatformResources().Return(&v1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + return taskExecutionMetadata +} + +func dummyTaskTemplate() *core.TaskTemplate { + return &core.TaskTemplate{ + Type: "test", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Command: []string{"command"}, + Args: []string{"{{.Input}}"}, + }, + }, + } +} + +func dummyInputReader() io.InputReader { + inputReader := &pluginsIOMock.InputReader{} + inputReader.OnGetInputPath().Return(storage.DataReference("test-data-reference")) + inputReader.OnGetInputPrefixPath().Return(storage.DataReference("test-data-reference-prefix")) + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + return inputReader +} + +func dummyExecContext(taskTemplate *core.TaskTemplate, r *v1.ResourceRequirements, rm *core.ExtendedResources, containerImage string, podTemplate *core.K8SPod) pluginsCore.TaskExecutionContext { + ow := &pluginsIOMock.OutputWriter{} + ow.OnGetOutputPrefixPath().Return("") + ow.OnGetRawOutputPrefix().Return("") + ow.OnGetCheckpointPrefix().Return("/checkpoint") + ow.OnGetPreviousCheckpointsPrefix().Return("/prev") + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(r, rm, containerImage, podTemplate)) + tCtx.OnInputReader().Return(dummyInputReader()) + tCtx.OnOutputWriter().Return(ow) + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(taskTemplate, nil) + tCtx.OnTaskReader().Return(taskReader) + return tCtx +} + +func TestPodSetup(t *testing.T) { + configAccessor := viper.NewAccessor(config1.Options{ + StrictMode: true, + SearchPaths: []string{"testdata/config.yaml"}, + }) + err := configAccessor.UpdateConfig(context.TODO()) + assert.NoError(t, err) + + t.Run("ApplyInterruptibleNodeAffinity", TestApplyInterruptibleNodeAffinity) + t.Run("UpdatePod", updatePod) + t.Run("ToK8sPodInterruptible", toK8sPodInterruptible) +} + +func TestAddRequiredNodeSelectorRequirements(t *testing.T) { + t.Run("with empty node affinity", func(t *testing.T) { + affinity := v1.Affinity{} + nst := v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + } + AddRequiredNodeSelectorRequirements(&affinity, nst) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + }, + }, + }, + }, + affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("with existing node affinity", func(t *testing.T) { + affinity := v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "required", + Operator: v1.NodeSelectorOpIn, + Values: []string{"required"}, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + v1.PreferredSchedulingTerm{ + Weight: 1, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "preferred", + Operator: v1.NodeSelectorOpIn, + Values: []string{"preferred"}, + }, + }, + }, + }, + }, + }, + } + nst := v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + } + AddRequiredNodeSelectorRequirements(&affinity, nst) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "required", + Operator: v1.NodeSelectorOpIn, + Values: []string{"required"}, + }, + v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + }, + }, + }, + }, + affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.PreferredSchedulingTerm{ + v1.PreferredSchedulingTerm{ + Weight: 1, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "preferred", + Operator: v1.NodeSelectorOpIn, + Values: []string{"preferred"}, + }, + }, + }, + }, + }, + affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + ) + }) +} + +func TestAddPreferredNodeSelectorRequirements(t *testing.T) { + t.Run("with empty node affinity", func(t *testing.T) { + affinity := v1.Affinity{} + nst := v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + } + AddPreferredNodeSelectorRequirements(&affinity, 10, nst) + assert.EqualValues( + t, + []v1.PreferredSchedulingTerm{ + v1.PreferredSchedulingTerm{ + Weight: 10, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + }, + }, + }, + }, + }, + affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + ) + }) + + t.Run("with existing node affinity", func(t *testing.T) { + affinity := v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "required", + Operator: v1.NodeSelectorOpIn, + Values: []string{"required"}, + }, + }, + }, + }, + }, + PreferredDuringSchedulingIgnoredDuringExecution: []v1.PreferredSchedulingTerm{ + v1.PreferredSchedulingTerm{ + Weight: 1, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "preferred", + Operator: v1.NodeSelectorOpIn, + Values: []string{"preferred"}, + }, + }, + }, + }, + }, + }, + } + nst := v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + } + AddPreferredNodeSelectorRequirements(&affinity, 10, nst) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "required", + Operator: v1.NodeSelectorOpIn, + Values: []string{"required"}, + }, + }, + }, + }, + affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.PreferredSchedulingTerm{ + v1.PreferredSchedulingTerm{ + Weight: 1, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "preferred", + Operator: v1.NodeSelectorOpIn, + Values: []string{"preferred"}, + }, + }, + }, + }, + v1.PreferredSchedulingTerm{ + Weight: 10, + Preference: v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "new", + Operator: v1.NodeSelectorOpIn, + Values: []string{"new"}, + }, + }, + }, + }, + }, + affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, + ) + }) +} + +func TestApplyInterruptibleNodeAffinity(t *testing.T) { + t.Run("WithInterruptibleNodeSelectorRequirement", func(t *testing.T) { + podSpec := v1.PodSpec{} + ApplyInterruptibleNodeAffinity(true, &podSpec) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("WithNonInterruptibleNodeSelectorRequirement", func(t *testing.T) { + podSpec := v1.PodSpec{} + ApplyInterruptibleNodeAffinity(false, &podSpec) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpDoesNotExist, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("WithExistingAffinityWithInterruptibleNodeSelectorRequirement", func(t *testing.T) { + podSpec := v1.PodSpec{ + Affinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "node selector requirement", + Operator: v1.NodeSelectorOpIn, + Values: []string{"exists"}, + }, + }, + }, + }, + }, + }, + }, + } + ApplyInterruptibleNodeAffinity(true, &podSpec) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "node selector requirement", + Operator: v1.NodeSelectorOpIn, + Values: []string{"exists"}, + }, + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) +} + +func TestApplyExtendedResourcesOverrides(t *testing.T) { + t4 := &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + } + partitionedA100 := &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + } + unpartitionedA100 := &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_Unpartitioned{ + Unpartitioned: true, + }, + }, + } + + t.Run("base and overrides are nil", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(nil, nil) + assert.NotNil(t, final) + }) + + t.Run("base is nil", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(nil, t4) + assert.EqualValues( + t, + t4.GetGpuAccelerator(), + final.GetGpuAccelerator(), + ) + }) + + t.Run("overrides is nil", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(t4, nil) + assert.EqualValues( + t, + t4.GetGpuAccelerator(), + final.GetGpuAccelerator(), + ) + }) + + t.Run("merging", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(partitionedA100, unpartitionedA100) + assert.EqualValues( + t, + unpartitionedA100.GetGpuAccelerator(), + final.GetGpuAccelerator(), + ) + }) +} + +func TestApplyGPUNodeSelectors(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "gpu-device", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + })) + + basePodSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + + t.Run("without gpu resource", func(t *testing.T) { + podSpec := &v1.PodSpec{} + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{Device: "nvidia-tesla-a100"}, + ) + assert.Nil(t, podSpec.Affinity) + }) + + t.Run("with gpu device spec only", func(t *testing.T) { + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{Device: "nvidia-tesla-a100"}, + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "gpu-device", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + podSpec.Tolerations, + ) + }) + + t.Run("with gpu device and partition size spec", func(t *testing.T) { + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "gpu-device", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + podSpec.Tolerations, + ) + }) + + t.Run("with unpartitioned gpu device spec", func(t *testing.T) { + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_Unpartitioned{ + Unpartitioned: true, + }, + }, + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpDoesNotExist, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "gpu-device", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + podSpec.Tolerations, + ) + }) + + t.Run("with unpartitioned gpu device spec with custom node selector and toleration", func(t *testing.T) { + gpuUnpartitionedNodeSelectorRequirement := v1.NodeSelectorRequirement{ + Key: "gpu-unpartitioned", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + } + gpuUnpartitionedToleration := v1.Toleration{ + Key: "gpu-unpartitioned", + Value: "true", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "gpu-device", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuUnpartitionedNodeSelectorRequirement: &gpuUnpartitionedNodeSelectorRequirement, + GpuUnpartitionedToleration: &gpuUnpartitionedToleration, + })) + + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_Unpartitioned{ + Unpartitioned: true, + }, + }, + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + gpuUnpartitionedNodeSelectorRequirement, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "gpu-device", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + gpuUnpartitionedToleration, + }, + podSpec.Tolerations, + ) + }) + + t.Run("with friendly device name normalization - NVIDIA H100", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "gpu-device", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + AcceleratorDevices: map[string]string{ + "H100": "nvidia-h100", + "A100": "nvidia-tesla-a100", + }, + })) + + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{Device: "H100"}, // Friendly name + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-h100"}, // Normalized name + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "gpu-device", + Value: "nvidia-h100", // Normalized name + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + podSpec.Tolerations, + ) + }) + + t.Run("with friendly device name normalization - case insensitive", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "gpu-device", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + AcceleratorDevices: map[string]string{ + "H100": "nvidia-h100", + }, + })) + + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{Device: "h100"}, // Lowercase friendly name + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-h100"}, // Still normalized correctly + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("with friendly device name normalization - Google TPU", func(t *testing.T) { + // Configure for TPU usage + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "google.com/tpu", + GpuDeviceNodeLabel: "tpu-device", + GpuPartitionSizeNodeLabel: "tpu-partition-size", + AcceleratorDevices: map[string]string{ + "V5E": "tpu-v5-lite-podslice", + "V5P": "tpu-v5p-slice", + }, + })) + + tpuPodSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "google.com/tpu": resource.MustParse("4"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + tpuPodSpec, + &core.GPUAccelerator{Device: "V5E"}, // Friendly name for TPU + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "tpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"tpu-v5-lite-podslice"}, // Normalized TPU name + }, + }, + }, + }, + tpuPodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("with unmapped device uses device name as-is", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "gpu-device", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + AcceleratorDevices: map[string]string{}, // Empty mapping + })) + + podSpec := basePodSpec.DeepCopy() + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{Device: "custom-gpu-device"}, + ) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-device", + Operator: v1.NodeSelectorOpIn, + Values: []string{"custom-gpu-device"}, // Used as-is since not in mapping + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) +} + +func TestApplyGPUNodeSelectors_DeviceClassOverrides(t *testing.T) { + // Test device class specific configuration overrides + + t.Run("Google TPU with device class specific config", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + DeviceNodeLabel: "cloud.google.com/gke-tpu-accelerator", + PartitionSizeNodeLabel: "cloud.google.com/gke-tpu-topology", + }, + }, + AcceleratorDevices: map[string]string{ + "V5E": "tpu-v5-lite-podslice", + }, + })) + + tpuPodSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "google.com/tpu": resource.MustParse("4"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + tpuPodSpec, + &core.GPUAccelerator{ + Device: "V5E", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + ) + + // Verify it uses Google-specific node labels instead of AWS labels + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "cloud.google.com/gke-tpu-accelerator", + Operator: v1.NodeSelectorOpIn, + Values: []string{"tpu-v5-lite-podslice"}, + }, + }, + }, + }, + tpuPodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "cloud.google.com/gke-tpu-accelerator", + Value: "tpu-v5-lite-podslice", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + tpuPodSpec.Tolerations, + ) + }) + + t.Run("NVIDIA GPU fallback to global config", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + // DeviceNodeLabel not specified - should fallback to global + }, + }, + AcceleratorDevices: map[string]string{ + "A100": "nvidia-tesla-a100", + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "A100", + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + ) + + // Verify it falls back to global GpuDeviceNodeLabel + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "k8s.amazonaws.com/accelerator", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("TPU with partition size", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + DeviceNodeLabel: "cloud.google.com/gke-tpu-accelerator", + PartitionSizeNodeLabel: "cloud.google.com/gke-tpu-topology", + }, + }, + AcceleratorDevices: map[string]string{ + "V5P": "tpu-v5p-slice", + }, + })) + + tpuPodSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "google.com/tpu": resource.MustParse("8"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + tpuPodSpec, + &core.GPUAccelerator{ + Device: "V5P", + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "2x2x2", + }, + }, + ) + + // Verify it uses Google-specific topology label + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "cloud.google.com/gke-tpu-accelerator", + Operator: v1.NodeSelectorOpIn, + Values: []string{"tpu-v5p-slice"}, + }, + { + Key: "cloud.google.com/gke-tpu-topology", + Operator: v1.NodeSelectorOpIn, + Values: []string{"2x2x2"}, + }, + }, + }, + }, + tpuPodSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "cloud.google.com/gke-tpu-accelerator", + Value: "tpu-v5p-slice", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "cloud.google.com/gke-tpu-topology", + Value: "2x2x2", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + tpuPodSpec.Tolerations, + ) + }) + + t.Run("NVIDIA GPU unpartitioned with custom node selector and toleration", func(t *testing.T) { + gpuUnpartitionedNodeSelectorRequirement := v1.NodeSelectorRequirement{ + Key: "gpu-unpartitioned", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + } + gpuUnpartitionedToleration := v1.Toleration{ + Key: "gpu-unpartitioned", + Value: "true", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + DeviceNodeLabel: "k8s.amazonaws.com/accelerator", + PartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + UnpartitionedNodeSelectorRequirement: &gpuUnpartitionedNodeSelectorRequirement, + UnpartitionedToleration: &gpuUnpartitionedToleration, + }, + }, + AcceleratorDevices: map[string]string{ + "A100": "nvidia-tesla-a100", + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "A100", + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + PartitionSizeValue: &core.GPUAccelerator_Unpartitioned{ + Unpartitioned: true, + }, + }, + ) + + // Verify it uses device-class-specific unpartitioned config + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "k8s.amazonaws.com/accelerator", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + gpuUnpartitionedNodeSelectorRequirement, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "k8s.amazonaws.com/accelerator", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + gpuUnpartitionedToleration, + }, + podSpec.Tolerations, + ) + }) + + t.Run("NVIDIA GPU unpartitioned with fallback to default behavior", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + DeviceNodeLabel: "k8s.amazonaws.com/accelerator", + PartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + // No unpartitioned config - should use default DoesNotExist behavior + }, + }, + AcceleratorDevices: map[string]string{ + "A100": "nvidia-tesla-a100", + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "A100", + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + PartitionSizeValue: &core.GPUAccelerator_Unpartitioned{ + Unpartitioned: true, + }, + }, + ) + + // Verify it uses default DoesNotExist behavior with GPU partition size label + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "k8s.amazonaws.com/accelerator", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + { + Key: "k8s.amazonaws.com/gpu-partition-size", + Operator: v1.NodeSelectorOpDoesNotExist, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + }) + + t.Run("Partial device class config merges with global defaults", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + GpuDeviceNodeLabel: "k8s.amazonaws.com/accelerator", + GpuPartitionSizeNodeLabel: "k8s.amazonaws.com/gpu-partition-size", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + DeviceNodeLabel: "nvidia.com/gpu.present", + // PartitionSizeNodeLabel not specified - should fall back to global + }, + }, + AcceleratorDevices: map[string]string{ + "A100": "nvidia-tesla-a100", + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + + ApplyGPUNodeSelectors( + podSpec, + &core.GPUAccelerator{ + Device: "A100", + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + ) + + // Verify it uses NVIDIA-specific device label but falls back to global partition label + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: "nvidia.com/gpu.present", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + { + // Falls back to global partition size label + Key: "k8s.amazonaws.com/gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + []v1.Toleration{ + { + Key: "nvidia.com/gpu.present", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "k8s.amazonaws.com/gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + podSpec.Tolerations, + ) + }) +} + +func TestApplyAcceleratorDeviceClassPodTemplate(t *testing.T) { + ctx := context.Background() + + t.Run("nil handling", func(t *testing.T) { + // Sub-test: Nil extendedResources returns immediately + t.Run("nil extendedResources", func(t *testing.T) { + podSpec := &v1.PodSpec{SchedulerName: "original"} + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + nil, // nil extendedResources + "primary", + "primary-init", + ) + assert.NoError(t, err) + assert.Equal(t, "original", podSpec.SchedulerName) + }) + + // Sub-test: Nil accelerator returns immediately + t.Run("nil accelerator", func(t *testing.T) { + podSpec := &v1.PodSpec{SchedulerName: "original"} + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{}, + "primary", + "primary-init", + ) + assert.NoError(t, err) + assert.Equal(t, "original", podSpec.SchedulerName) + }) + + // Sub-test: Nil PodTemplate does nothing + t.Run("nil PodTemplate", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + // PodTemplate: nil + }, + }, + })) + + podSpec := &v1.PodSpec{SchedulerName: "original"} + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + }, + "primary", + "primary-init", + ) + assert.NoError(t, err) + assert.Equal(t, "original", podSpec.SchedulerName) + }) + }) + + t.Run("scalar field merge behavior", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + SchedulerName: "volcano", // Device class default + }, + }, + }, + }, + }, + })) + + // Sub-test: Task value overrides device class default + t.Run("task overrides device class", func(t *testing.T) { + podSpec := &v1.PodSpec{SchedulerName: "default-scheduler"} // Task-specific value + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + }, + "primary", + "primary-init", + ) + assert.NoError(t, err) + // Task value should win (BASE semantics) + assert.Equal(t, "default-scheduler", podSpec.SchedulerName) + }) + + // Sub-test: Device class default applies when task doesn't set value + t.Run("device class applies when task unset", func(t *testing.T) { + podSpec := &v1.PodSpec{} // Task doesn't set schedulerName + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + }, + "primary", + "primary-init", + ) + assert.NoError(t, err) + // Device class default should apply + assert.Equal(t, "volcano", podSpec.SchedulerName) + }) + }) + + t.Run("merge semantics validation", func(t *testing.T) { + // Single comprehensive test validating slice append and map merge behaviors + overrideToleration := v1.Toleration{ + Key: "tpu-topology", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{overrideToleration}, + NodeSelector: map[string]string{ + "new-key": "new-value", + "existing-key": "default-value", + }, + }, + }, + }, + }, + }, + })) + + existingToleration := v1.Toleration{ + Key: "interruptible", + Value: "true", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + podSpec := &v1.PodSpec{ + Tolerations: []v1.Toleration{existingToleration}, + NodeSelector: map[string]string{ + "existing-key": "task-value", + "keep-key": "keep-value", + }, + } + + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + }, + "primary", + "primary-init", + ) + assert.NoError(t, err) + + // Validate slice append: tolerations from both device class and task + assert.Len(t, podSpec.Tolerations, 2) + assert.Contains(t, podSpec.Tolerations, existingToleration) + assert.Contains(t, podSpec.Tolerations, overrideToleration) + + // Validate map merge: task values win for conflicts, device class adds new keys + assert.Len(t, podSpec.NodeSelector, 3) + assert.Equal(t, "task-value", podSpec.NodeSelector["existing-key"]) // Task wins + assert.Equal(t, "keep-value", podSpec.NodeSelector["keep-key"]) // Task only + assert.Equal(t, "new-value", podSpec.NodeSelector["new-key"]) // Device class adds + }) + + t.Run("default container template affects all containers", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "default", + Env: []v1.EnvVar{ + {Name: "NVIDIA_VISIBLE_DEVICES", Value: "all"}, + {Name: "NCCL_DEBUG", Value: "INFO"}, + }, + }, + }, + }, + }, + }, + }, + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "main", Image: "app:1.0"}, + {Name: "sidecar", Image: "monitor:1.0"}, + }, + } + + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + }, + "main", + "main-init", + ) + assert.NoError(t, err) + + // Both containers should have the env vars from the default template + assert.Len(t, podSpec.Containers, 2) + for i, container := range podSpec.Containers { + assert.Contains(t, container.Env, v1.EnvVar{Name: "NVIDIA_VISIBLE_DEVICES", Value: "all"}, + "Container %d (%s) missing env var", i, container.Name) + assert.Contains(t, container.Env, v1.EnvVar{Name: "NCCL_DEBUG", Value: "INFO"}, + "Container %d (%s) missing env var", i, container.Name) + } + }) + + t.Run("primary container template affects only primary container", func(t *testing.T) { + gpuLimit := resource.MustParse("8") + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary", + Env: []v1.EnvVar{ + {Name: "CUDA_VISIBLE_DEVICES", Value: "all"}, + }, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceName("nvidia.com/gpu"): gpuLimit, + }, + }, + }, + }, + }, + }, + }, + }, + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "workload", Image: "gpu-app:1.0"}, + {Name: "metrics", Image: "prometheus:1.0"}, + }, + } + + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }, + }, + "workload", + "workload-init", + ) + assert.NoError(t, err) + + // Primary container should have the template applied + assert.Contains(t, podSpec.Containers[0].Env, v1.EnvVar{Name: "CUDA_VISIBLE_DEVICES", Value: "all"}) + gpuResource := podSpec.Containers[0].Resources.Limits[v1.ResourceName("nvidia.com/gpu")] + assert.Equal(t, "8", gpuResource.String()) + + // Sidecar should NOT have the template + assert.NotContains(t, podSpec.Containers[1].Env, v1.EnvVar{Name: "CUDA_VISIBLE_DEVICES", Value: "all"}) + assert.Empty(t, podSpec.Containers[1].Resources.Limits) + }) + + t.Run("both default and primary templates merge correctly", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "default", + Env: []v1.EnvVar{ + {Name: "BASE_VAR", Value: "from-default"}, + {Name: "SHARED_VAR", Value: "shared"}, + }, + ImagePullPolicy: v1.PullAlways, + }, + { + Name: "primary", + Env: []v1.EnvVar{ + {Name: "BASE_VAR", Value: "from-primary"}, + {Name: "PRIMARY_ONLY", Value: "primary-value"}, + }, + }, + }, + }, + }, + }, + }, + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + {Name: "trainer", Image: "tpu-trainer:1.0"}, + {Name: "logger", Image: "log-collector:1.0"}, + }, + } + + podSpec, err := applyAcceleratorDeviceClassPodTemplate( + ctx, + podSpec, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }, + }, + "trainer", + "trainer-init", + ) + assert.NoError(t, err) + + // Primary container: primary template overrides default for BASE_VAR + primaryContainer := podSpec.Containers[0] + assert.Equal(t, v1.PullAlways, primaryContainer.ImagePullPolicy) // from default + assert.Contains(t, primaryContainer.Env, v1.EnvVar{Name: "BASE_VAR", Value: "from-primary"}) // primary wins + assert.Contains(t, primaryContainer.Env, v1.EnvVar{Name: "SHARED_VAR", Value: "shared"}) // from default + assert.Contains(t, primaryContainer.Env, v1.EnvVar{Name: "PRIMARY_ONLY", Value: "primary-value"}) // from primary + + // Non-primary container: only gets default template + sidecarContainer := podSpec.Containers[1] + assert.Equal(t, v1.PullAlways, sidecarContainer.ImagePullPolicy) + assert.Contains(t, sidecarContainer.Env, v1.EnvVar{Name: "BASE_VAR", Value: "from-default"}) + assert.Contains(t, sidecarContainer.Env, v1.EnvVar{Name: "SHARED_VAR", Value: "shared"}) + assert.NotContains(t, sidecarContainer.Env, v1.EnvVar{Name: "PRIMARY_ONLY", Value: "primary-value"}) + }) +} + +func updatePod(t *testing.T) { + taskExecutionMetadata := dummyTaskExecutionMetadata(&v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, nil, "", nil) + + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Tolerations: []v1.Toleration{ + { + Key: "my toleration key", + Value: "my toleration value", + }, + }, + NodeSelector: map[string]string{ + "user": "also configured", + }, + }, + } + UpdatePod(taskExecutionMetadata, []v1.ResourceRequirements{}, &pod.Spec) + assert.Equal(t, v1.RestartPolicyNever, pod.Spec.RestartPolicy) + for _, tol := range pod.Spec.Tolerations { + if tol.Key == "x/flyte" { + assert.Equal(t, tol.Value, "interruptible") + assert.Equal(t, tol.Operator, v1.TolerationOperator("Equal")) + assert.Equal(t, tol.Effect, v1.TaintEffect("NoSchedule")) + } else if tol.Key == "my toleration key" { + assert.Equal(t, tol.Value, "my toleration value") + } else { + t.Fatalf("unexpected toleration [%+v]", tol) + } + } + assert.Equal(t, "service-account", pod.Spec.ServiceAccountName) + assert.Equal(t, "flyte-scheduler", pod.Spec.SchedulerName) + assert.Len(t, pod.Spec.Tolerations, 2) + assert.EqualValues(t, map[string]string{ + "x/interruptible": "true", + "user": "also configured", + }, pod.Spec.NodeSelector) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) +} + +func TestUpdatePodWithDefaultAffinityAndInterruptibleNodeSelectorRequirement(t *testing.T) { + taskExecutionMetadata := dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil) + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultAffinity: &v1.Affinity{ + NodeAffinity: &v1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "default node affinity", + Operator: v1.NodeSelectorOpIn, + Values: []string{"exists"}, + }, + }, + }, + }, + }, + }, + }, + InterruptibleNodeSelectorRequirement: &v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + })) + for i := 0; i < 3; i++ { + podSpec := v1.PodSpec{} + UpdatePod(taskExecutionMetadata, []v1.ResourceRequirements{}, &podSpec) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "default node affinity", + Operator: v1.NodeSelectorOpIn, + Values: []string{"exists"}, + }, + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + podSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + } +} + +func toK8sPodInterruptible(t *testing.T) { + ctx := context.TODO() + + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, nil, "", nil) + + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.Len(t, p.Tolerations, 2) + assert.Equal(t, "x/flyte", p.Tolerations[1].Key) + assert.Equal(t, "interruptible", p.Tolerations[1].Value) + assert.Equal(t, 2, len(p.NodeSelector)) + assert.Equal(t, "true", p.NodeSelector["x/interruptible"]) + assert.EqualValues( + t, + []v1.NodeSelectorTerm{ + v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) +} + +func TestToK8sPod(t *testing.T) { + ctx := context.TODO() + + tolGPU := v1.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + tolEphemeralStorage := v1.Toleration{ + Key: "ephemeral-storage", + Value: "dedicated", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + ResourceTolerations: map[v1.ResourceName][]v1.Toleration{ + v1.ResourceEphemeralStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + })) + + op := &pluginsIOMock.OutputFilePaths{} + op.On("GetOutputPrefixPath").Return(storage.DataReference("")) + op.On("GetRawOutputPrefix").Return(storage.DataReference("")) + + t.Run("WithGPU", func(t *testing.T) { + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, nil, "", nil) + + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.Equal(t, len(p.Tolerations), 2) + }) + + t.Run("NoGPU", func(t *testing.T) { + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, nil, "", nil) + + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.Equal(t, len(p.Tolerations), 1) + assert.Equal(t, "some-acceptable-name", p.Containers[0].Name) + }) + + t.Run("Default toleration, selector, scheduler", func(t *testing.T) { + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + }, + }, nil, "", nil) + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultNodeSelector: map[string]string{ + "nodeId": "123", + }, + SchedulerName: "myScheduler", + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + })) + + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.Equal(t, 1, len(p.NodeSelector)) + assert.Equal(t, "myScheduler", p.SchedulerName) + assert.Equal(t, "some-acceptable-name", p.Containers[0].Name) + assert.Nil(t, p.SecurityContext) + }) + + t.Run("default-pod-sec-ctx", func(t *testing.T) { + v := int64(1000) + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultPodSecurityContext: &v1.PodSecurityContext{ + RunAsGroup: &v, + }, + })) + + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.NotNil(t, p.SecurityContext) + assert.Equal(t, *p.SecurityContext.RunAsGroup, v) + }) + + t.Run("enableHostNetwork", func(t *testing.T) { + enabled := true + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + EnableHostNetworkingPod: &enabled, + })) + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.True(t, p.HostNetwork) + }) + + t.Run("explicitDisableHostNetwork", func(t *testing.T) { + enabled := false + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + EnableHostNetworkingPod: &enabled, + })) + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.False(t, p.HostNetwork) + }) + + t.Run("skipSettingHostNetwork", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{})) + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.False(t, p.HostNetwork) + }) + + t.Run("default-pod-dns-config", func(t *testing.T) { + val1 := "1" + val2 := "1" + val3 := "3" + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultPodDNSConfig: &v1.PodDNSConfig{ + Nameservers: []string{"8.8.8.8", "8.8.4.4"}, + Options: []v1.PodDNSConfigOption{ + { + Name: "ndots", + Value: &val1, + }, + { + Name: "single-request-reopen", + }, + { + Name: "timeout", + Value: &val2, + }, + { + Name: "attempts", + Value: &val3, + }, + }, + Searches: []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, + }, + })) + + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + assert.NotNil(t, p.DNSConfig) + assert.Equal(t, []string{"8.8.8.8", "8.8.4.4"}, p.DNSConfig.Nameservers) + assert.Equal(t, "ndots", p.DNSConfig.Options[0].Name) + assert.Equal(t, val1, *p.DNSConfig.Options[0].Value) + assert.Equal(t, "single-request-reopen", p.DNSConfig.Options[1].Name) + assert.Equal(t, "timeout", p.DNSConfig.Options[2].Name) + assert.Equal(t, val2, *p.DNSConfig.Options[2].Value) + assert.Equal(t, "attempts", p.DNSConfig.Options[3].Name) + assert.Equal(t, val3, *p.DNSConfig.Options[3].Value) + assert.Equal(t, []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, p.DNSConfig.Searches) + }) + + t.Run("environmentVariables", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultEnvVars: map[string]string{ + "foo": "bar", + }, + })) + x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + p, _, _, err := ToK8sPodSpec(ctx, x) + assert.NoError(t, err) + for _, c := range p.Containers { + uniqueVariableNames := make(map[string]string) + for _, envVar := range c.Env { + if _, ok := uniqueVariableNames[envVar.Name]; ok { + t.Errorf("duplicate environment variable %s", envVar.Name) + } + uniqueVariableNames[envVar.Name] = envVar.Value + } + } + }) + + // TODO @pvditt + //t.Run("AcceleratedInputsEnabled", func(t *testing.T) { + // cfg := propellerCfg.GetConfig() + // cfg.AcceleratedInputs.Enabled = true + // cfg.AcceleratedInputs.VolumePath = "/test/path" + // cfg.AcceleratedInputs.LocalPathPrefix = "/test/local" + // defer func() { cfg.AcceleratedInputs.Enabled = false }() + // x := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{}, nil, "", nil) + // + // p, _, _, err := ToK8sPodSpec(ctx, x) + // + // assert.NoError(t, err) + // if assert.Len(t, p.Volumes, 1) { + // vol := p.Volumes[0] + // assert.Equal(t, "union-persistent-data-volume", vol.Name) + // assert.Equal(t, cfg.AcceleratedInputs.VolumePath, vol.HostPath.Path) + // } + // if assert.Len(t, p.Containers, 1) && assert.Len(t, p.Containers[0].VolumeMounts, 1) { + // mount := p.Containers[0].VolumeMounts[0] + // assert.Equal(t, "union-persistent-data-volume", mount.Name) + // assert.Equal(t, cfg.AcceleratedInputs.LocalPathPrefix, mount.MountPath) + // assert.True(t, mount.ReadOnly) + // } + //}) +} + +func TestToK8sPodContainerImage(t *testing.T) { + t.Run("Override container image", func(t *testing.T) { + taskContext := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + }}, nil, "foo:latest", nil) + p, _, _, err := ToK8sPodSpec(context.TODO(), taskContext) + assert.NoError(t, err) + assert.Equal(t, "foo:latest", p.Containers[0].Image) + }) +} + +func TestPodTemplateOverride(t *testing.T) { + metadata := &core.K8SObjectMetadata{ + Labels: map[string]string{ + "l": "a", + }, + Annotations: map[string]string{ + "a": "b", + }, + } + + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "foo", + Image: "foo:latest", + Args: []string{"foo", "bar"}, + }, + }, + } + + podSpecStruct, err := utils.MarshalObjToStruct(podSpec) + assert.NoError(t, err) + + t.Run("Override pod template", func(t *testing.T) { + taskContext := dummyExecContext(dummyTaskTemplate(), &v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + }}, nil, "", &core.K8SPod{ + PrimaryContainerName: "foo", + PodSpec: podSpecStruct, + Metadata: metadata, + }) + p, m, _, err := ToK8sPodSpec(context.TODO(), taskContext) + assert.NoError(t, err) + assert.Equal(t, "a", m.Labels["l"]) + assert.Equal(t, "b", m.Annotations["a"]) + assert.Equal(t, "foo:latest", p.Containers[0].Image) + assert.Equal(t, "foo", p.Containers[0].Name) + }) +} + +func TestToK8sPodExtendedResources(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: ResourceNvidiaGPU, + })) + + fixtures := []struct { + name string + resources *v1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []v1.NodeSelectorTerm + expectedTol []v1.Toleration + }{ + { + "without overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + taskTemplate := dummyTaskTemplate() + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskContext := dummyExecContext(taskTemplate, f.resources, f.extendedResourcesOverride, "", nil) + p, _, _, err := ToK8sPodSpec(context.TODO(), taskContext) + assert.NoError(t, err) + + assert.EqualValues( + t, + f.expectedNsr, + p.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + p.Tolerations, + ) + }) + } +} + +// TestToK8sPodDeviceClass validates the complete three-way merge order through ToK8sPodSpec: +// Task PodSpec > Device Class PodTemplate > Base PodTemplate +func TestToK8sPodDeviceClass(t *testing.T) { + t.Run("device class template merge", func(t *testing.T) { + // 1. Configure base PodTemplate in DefaultPodTemplateStore + basePodTemplate := v1.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "base-template", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + SchedulerName: "base-scheduler", + DNSPolicy: v1.DNSClusterFirst, + Tolerations: []v1.Toleration{ + {Key: "base-tol", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}, + }, + NodeSelector: map[string]string{ + "base-key": "base-value", + }, + }, + }, + } + DefaultPodTemplateStore.Store(&basePodTemplate) + + // 2. Configure device class PodTemplate for AMAZON_NEURON + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "AMAZON_NEURON": { + ResourceName: "aws.amazon.com/neuron", + PodTemplate: &v1.PodTemplate{ + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + SchedulerName: "device-scheduler", // Overrides base + PriorityClassName: "device-priority", + Tolerations: []v1.Toleration{ + {Key: "device-tol", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}, + }, + NodeSelector: map[string]string{ + "device-key": "device-value", + }, + }, + }, + }, + }, + }, + })) + + // 3. Configure task-level K8SPod override + taskPodSpec := v1.PodSpec{ + PriorityClassName: "task-priority", + Tolerations: []v1.Toleration{ + {Key: "task-tol", Operator: v1.TolerationOpExists, Effect: v1.TaintEffectNoSchedule}, + }, + NodeSelector: map[string]string{ + "task-key": "task-value", + }, + Containers: []v1.Container{ + { + Name: "primary-container", + }, + }, + } + taskPodSpecStruct, err := utils.MarshalObjToStruct(taskPodSpec) + assert.NoError(t, err) + + taskTemplate := dummyTaskTemplate() + taskTemplate.Metadata = &core.TaskMetadata{ + PodTemplateName: "base-template", + } + taskTemplate.ExtendedResources = &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_AMAZON_NEURON, + }, + } + + k8sPod := &core.K8SPod{ + PodSpec: taskPodSpecStruct, + PrimaryContainerName: "primary-container", + } + + taskContext := dummyExecContext(taskTemplate, &v1.ResourceRequirements{}, nil, "", k8sPod) + podSpec, _, _, err := ToK8sPodSpec(context.TODO(), taskContext) + assert.NoError(t, err) + + // 4. Validate merge order: Task > Device Class > Base Template + + // Scalars: Device class scalar overrides base + assert.Equal(t, "device-scheduler", podSpec.SchedulerName, + "Device class SchedulerName should override base") + + // Device class scalar overridden by task + assert.Equal(t, "task-priority", podSpec.PriorityClassName, + "Task PriorityClassName should override device class") + + // Base scalar preserved (not in task or device class) + assert.Equal(t, v1.DNSClusterFirst, podSpec.DNSPolicy, + "Base DNSPolicy should be preserved") + + // Slices: All tolerations should be appended + tolerationKeys := make([]string, 0) + for _, tol := range podSpec.Tolerations { + tolerationKeys = append(tolerationKeys, tol.Key) + } + assert.Contains(t, tolerationKeys, "base-tol", + "Base toleration should be present") + assert.Contains(t, tolerationKeys, "device-tol", + "Device class toleration should be present") + assert.Contains(t, tolerationKeys, "task-tol", + "Task toleration should be present") + + // Maps: All node selector entries should be merged + assert.Equal(t, "base-value", podSpec.NodeSelector["base-key"], + "Base node selector should be present") + assert.Equal(t, "device-value", podSpec.NodeSelector["device-key"], + "Device class node selector should be present") + assert.Equal(t, "task-value", podSpec.NodeSelector["task-key"], + "Task node selector should be present") + }) +} + +func TestDemystifyPending(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + CreateContainerErrorGracePeriod: config1.Duration{ + Duration: time.Minute * 3, + }, + CreateContainerConfigErrorGracePeriod: config1.Duration{ + Duration: time.Minute * 4, + }, + ImagePullBackoffGracePeriod: config1.Duration{ + Duration: time.Minute * 3, + }, + PodPendingTimeout: config1.Duration{ + Duration: 0, + }, + })) + + t.Run("PodNotScheduled", func(t *testing.T) { + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskStatus.Phase()) + }) + + t.Run("PodUnschedulable", func(t *testing.T) { + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReasonUnschedulable, + Status: v1.ConditionFalse, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskStatus.Phase()) + }) + + t.Run("PodNotScheduled", func(t *testing.T) { + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskStatus.Phase()) + }) + + t.Run("PodUnschedulable", func(t *testing.T) { + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReasonUnschedulable, + Status: v1.ConditionUnknown, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskStatus.Phase()) + }) + + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + { + Type: v1.PodReasonUnschedulable, + Status: v1.ConditionUnknown, + }, + { + Type: v1.PodScheduled, + Status: v1.ConditionTrue, + }, + }, + } + + t.Run("ContainerCreating", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ContainerCreating", + Message: "this is not an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("ErrImagePull", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ErrImagePull", + Message: "this is not an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("PodInitializing", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "PodInitializing", + Message: "this is not an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("ImagePullBackOffWithinGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime = metav1.Now() + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + Message: "this is an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("ImagePullBackOffOutsideGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime.Time = metav1.Now().Add(-config.GetK8sPluginConfig().ImagePullBackoffGracePeriod.Duration) + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "ImagePullBackOff", + Message: "this is an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) + + t.Run("InvalidImageName", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "InvalidImageName", + Message: "this is an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) + + t.Run("RegistryUnavailable", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "RegistryUnavailable", + Message: "this is an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) + + t.Run("RandomError", func(t *testing.T) { + s.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "RandomError", + Message: "this is an error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) + + t.Run("CreateContainerConfigErrorWithinGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime = metav1.Now() + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CreateContainerConfigError", + Message: "this is a transient error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("CreateContainerConfigErrorOutsideGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime.Time = metav1.Now().Add(-config.GetK8sPluginConfig().CreateContainerConfigErrorGracePeriod.Duration) + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CreateContainerConfigError", + Message: "this a permanent error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) + + t.Run("CreateContainerErrorWithinGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime = metav1.Now() + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CreateContainerError", + Message: "this is a transient error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseInitializing, taskStatus.Phase()) + }) + + t.Run("CreateContainerErrorOutsideGracePeriod", func(t *testing.T) { + s2 := *s.DeepCopy() + s2.Conditions[0].LastTransitionTime.Time = metav1.Now().Add(-config.GetK8sPluginConfig().CreateContainerErrorGracePeriod.Duration) + s2.ContainerStatuses = []v1.ContainerStatus{ + { + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "CreateContainerError", + Message: "this a permanent error", + }, + }, + }, + } + taskStatus, err := DemystifyPending(s2, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, taskStatus.Phase()) + assert.True(t, taskStatus.CleanupOnFailure()) + }) +} + +func TestDemystifyPendingTimeout(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + CreateContainerErrorGracePeriod: config1.Duration{ + Duration: time.Minute * 3, + }, + ImagePullBackoffGracePeriod: config1.Duration{ + Duration: time.Minute * 3, + }, + PodPendingTimeout: config1.Duration{ + Duration: 10, + }, + })) + + s := v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + }, + }, + } + s.Conditions[0].LastTransitionTime.Time = metav1.Now().Add(-config.GetK8sPluginConfig().PodPendingTimeout.Duration) + + t.Run("PodPendingExceedsTimeout", func(t *testing.T) { + taskStatus, err := DemystifyPending(s, pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskStatus.Phase()) + assert.Equal(t, "PodPendingTimeout", taskStatus.Err().Code) + assert.Equal(t, core.ExecutionError_SYSTEM, taskStatus.Err().Kind) + assert.True(t, taskStatus.CleanupOnFailure()) + }) +} + +func TestDemystifySuccess(t *testing.T) { + t.Run("OOMKilled", func(t *testing.T) { + phaseInfo, err := DemystifySuccess(v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: OOMKilled, + }, + }, + }, + }, + }, pluginsCore.TaskInfo{}) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) + }) + + t.Run("InitContainer OOMKilled", func(t *testing.T) { + phaseInfo, err := DemystifySuccess(v1.PodStatus{ + InitContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: OOMKilled, + }, + }, + }, + }, + }, pluginsCore.TaskInfo{}) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) + }) + + t.Run("success", func(t *testing.T) { + phaseInfo, err := DemystifySuccess(v1.PodStatus{}, pluginsCore.TaskInfo{}) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, phaseInfo.Phase()) + }) +} + +func TestDemystifyFailure(t *testing.T) { + ctx := context.TODO() + + t.Run("unknown-error", func(t *testing.T) { + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{}, pluginsCore.TaskInfo{}, "") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "Interrupted", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) + }) + + t.Run("known-error", func(t *testing.T) { + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{Reason: "hello"}, pluginsCore.TaskInfo{}, "") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "hello", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + }) + + t.Run("OOMKilled", func(t *testing.T) { + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: OOMKilled, + ExitCode: 137, + }, + }, + }, + }, + }, pluginsCore.TaskInfo{}, "") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "OOMKilled", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + }) + + t.Run("SIGKILL non-primary container", func(t *testing.T) { + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "some reason", + ExitCode: SIGKILL, + }, + }, + Name: "non-primary-container", + }, + }, + }, pluginsCore.TaskInfo{}, "primary-container") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "Interrupted", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + }) + + t.Run("SIGKILL primary container", func(t *testing.T) { + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{ + ContainerStatuses: []v1.ContainerStatus{ + { + LastTerminationState: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + Reason: "some reason", + ExitCode: SIGKILL, + }, + }, + Name: "primary-container", + }, + }, + }, pluginsCore.TaskInfo{}, "primary-container") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "Interrupted", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) + }) + + t.Run("GKE node preemption", func(t *testing.T) { + for _, reason := range []string{ + "Terminated", + "Shutdown", + "NodeShutdown", + } { + t.Run(reason, func(t *testing.T) { + message := "Test pod status message" + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{ + Message: message, + Reason: reason, + // Can't always rely on GCP returining container statuses when node is preempted + ContainerStatuses: []v1.ContainerStatus{}, + }, pluginsCore.TaskInfo{}, "") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind()) + assert.Equal(t, message, phaseInfo.Err().GetMessage()) + }) + } + }) + + t.Run("Kubelet admission denies pod due to missing node label", func(t *testing.T) { + for _, reason := range []string{ + "NodeAffinity", + } { + t.Run(reason, func(t *testing.T) { + message := "Pod was rejected: Predicate NodeAffinity failed: node(s) didn't match Pod's node affinity/selector" + phaseInfo, err := DemystifyFailure(ctx, v1.PodStatus{ + Message: message, + Reason: reason, + Phase: v1.PodFailed, + // Can't always rely on GCP returining container statuses when node is preempted + ContainerStatuses: []v1.ContainerStatus{}, + }, pluginsCore.TaskInfo{}, "") + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "Interrupted", phaseInfo.Err().GetCode()) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().GetKind()) + assert.Equal(t, message, phaseInfo.Err().GetMessage()) + }) + } + }) +} + +func TestDemystifyPending_testcases(t *testing.T) { + + tests := []struct { + name string + filename string + isErr bool + errCode string + message string + }{ + {"ImagePullBackOff", "imagepull-failurepod.json", false, "ContainersNotReady|ImagePullBackOff", "Grace period [3m0s] exceeded|containers with unready status: [fdf98e4ed2b524dc3bf7-get-flyte-id-task-0]|Back-off pulling image \"image\""}, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + testFile := filepath.Join("testdata", tt.filename) + data, err := ioutil.ReadFile(testFile) + assert.NoError(t, err, "failed to read file %s", testFile) + pod := &v1.Pod{} + if assert.NoError(t, json.Unmarshal(data, pod), "failed to unmarshal json in %s. Expected of type v1.Pod", testFile) { + p, err := DemystifyPending(pod.Status, pluginsCore.TaskInfo{}) + if tt.isErr { + assert.Error(t, err, "Error expected from method") + } else { + assert.NoError(t, err, "Error not expected") + assert.NotNil(t, p) + assert.Equal(t, p.Phase(), pluginsCore.PhaseRetryableFailure) + if assert.NotNil(t, p.Err()) { + assert.Equal(t, p.Err().Code, tt.errCode) + assert.Equal(t, p.Err().Message, tt.message) + } + } + } + }) + } +} + +func TestDeterminePrimaryContainerPhase(t *testing.T) { + ctx := context.TODO() + primaryContainerName := "primary" + secondaryContainer := v1.ContainerStatus{ + Name: "secondary", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + } + var info = &pluginsCore.TaskInfo{} + t.Run("primary container waiting", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "just dawdling", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRunning, phaseInfo.Phase()) + }) + t.Run("primary container running", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: metav1.Now(), + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRunning, phaseInfo.Phase()) + }) + t.Run("primary container failed", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "foo", + Message: "foo failed", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "foo", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, "\r\n[primary] terminated with exit code (1). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().Message) + }) + t.Run("primary container failed - SIGKILL", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 137, + Reason: "foo", + Message: "foo failed", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "foo", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) + assert.Equal(t, "\r\n[primary] terminated with exit code (137). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().Message) + }) + t.Run("primary container failed - SIGKILL unsigned", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 247, + Reason: "foo", + Message: "foo failed", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, "foo", phaseInfo.Err().Code) + assert.Equal(t, core.ExecutionError_SYSTEM, phaseInfo.Err().Kind) + assert.Equal(t, "\r\n[primary] terminated with exit code (247). Reason [foo]. Message: \nfoo failed.", phaseInfo.Err().Message) + }) + t.Run("primary container succeeded", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseSuccess, phaseInfo.Phase()) + }) + t.Run("missing primary container", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, + }, info) + assert.Equal(t, pluginsCore.PhasePermanentFailure, phaseInfo.Phase()) + assert.Equal(t, PrimaryContainerNotFound, phaseInfo.Err().Code) + assert.Equal(t, "Primary container [primary] not found in pod's container statuses", phaseInfo.Err().Message) + }) + t.Run("primary container failed with OOMKilled", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 0, + Reason: OOMKilled, + Message: "foo failed", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, OOMKilled, phaseInfo.Err().Code) + assert.Equal(t, "\r\n[primary] terminated with exit code (0). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().Message) + }) + t.Run("primary container failed with OOMKilled - SIGKILL", func(t *testing.T) { + phaseInfo := DeterminePrimaryContainerPhase(ctx, primaryContainerName, []v1.ContainerStatus{ + secondaryContainer, { + Name: primaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 137, + Reason: OOMKilled, + Message: "foo failed", + }, + }, + }, + }, info) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + assert.Equal(t, core.ExecutionError_USER, phaseInfo.Err().Kind) + assert.Equal(t, OOMKilled, phaseInfo.Err().Code) + assert.Equal(t, "\r\n[primary] terminated with exit code (137). Reason [OOMKilled]. Message: \nfoo failed.", phaseInfo.Err().Message) + }) +} + +func TestGetPodTemplate(t *testing.T) { + ctx := context.TODO() + + podTemplate := v1.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + Namespace: "bar", + }, + } + + t.Run("PodTemplateDoesNotExist", func(t *testing.T) { + // initialize TaskExecutionContext + task := &core.TaskTemplate{ + Type: "test", + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + // initialize PodTemplateStore + store := NewPodTemplateStore() + store.SetDefaultNamespace(podTemplate.Namespace) + + // validate base PodTemplate + basePodTemplate, err := getBasePodTemplate(ctx, tCtx, store) + assert.Nil(t, err) + assert.Nil(t, basePodTemplate) + }) + + t.Run("PodTemplateFromTaskTemplateNameExists", func(t *testing.T) { + // initialize TaskExecutionContext + task := &core.TaskTemplate{ + Metadata: &core.TaskMetadata{ + PodTemplateName: "foo", + }, + Type: "test", + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + // initialize PodTemplateStore + store := NewPodTemplateStore() + store.SetDefaultNamespace(podTemplate.Namespace) + store.Store(&podTemplate) + + // validate base PodTemplate + basePodTemplate, err := getBasePodTemplate(ctx, tCtx, store) + assert.Nil(t, err) + assert.True(t, reflect.DeepEqual(podTemplate, *basePodTemplate)) + }) + + t.Run("PodTemplateFromTaskTemplateNameDoesNotExist", func(t *testing.T) { + // initialize TaskExecutionContext + task := &core.TaskTemplate{ + Type: "test", + Metadata: &core.TaskMetadata{ + PodTemplateName: "foo", + }, + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + // initialize PodTemplateStore + store := NewPodTemplateStore() + store.SetDefaultNamespace(podTemplate.Namespace) + + // validate base PodTemplate + basePodTemplate, err := getBasePodTemplate(ctx, tCtx, store) + assert.NotNil(t, err) + assert.Nil(t, basePodTemplate) + }) + + t.Run("PodTemplateFromDefaultPodTemplate", func(t *testing.T) { + // set default PodTemplate name configuration + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + DefaultPodTemplateName: "foo", + })) + + // initialize TaskExecutionContext + task := &core.TaskTemplate{ + Type: "test", + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + // initialize PodTemplateStore + store := NewPodTemplateStore() + store.SetDefaultNamespace(podTemplate.Namespace) + store.Store(&podTemplate) + + // validate base PodTemplate + basePodTemplate, err := getBasePodTemplate(ctx, tCtx, store) + assert.Nil(t, err) + assert.True(t, reflect.DeepEqual(podTemplate, *basePodTemplate)) + }) +} + +func TestMergeWithBasePodTemplate(t *testing.T) { + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Name: "foo", + }, + v1.Container{ + Name: "bar", + }, + }, + } + + objectMeta := metav1.ObjectMeta{ + Labels: map[string]string{ + "fooKey": "barValue", + }, + } + + t.Run("BasePodTemplateDoesNotExist", func(t *testing.T) { + task := &core.TaskTemplate{ + Type: "test", + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo", "foo-init") + assert.Nil(t, err) + assert.True(t, reflect.DeepEqual(podSpec, *resultPodSpec)) + assert.True(t, reflect.DeepEqual(objectMeta, *resultObjectMeta)) + }) + + t.Run("BasePodTemplateExists", func(t *testing.T) { + primaryContainerTemplate := v1.Container{ + Name: primaryContainerTemplateName, + TerminationMessagePath: "/dev/primary-termination-log", + } + + primaryInitContainerTemplate := v1.Container{ + Name: primaryInitContainerTemplateName, + TerminationMessagePath: "/dev/primary-init-termination-log", + } + + podTemplate := v1.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fooTemplate", + Namespace: "test-namespace", + }, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: map[string]string{ + "fooKey": "bazValue", + "barKey": "bazValue", + }, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + primaryContainerTemplate, + }, + InitContainers: []v1.Container{ + primaryInitContainerTemplate, + }, + }, + }, + } + + DefaultPodTemplateStore.Store(&podTemplate) + + task := &core.TaskTemplate{ + Metadata: &core.TaskMetadata{ + PodTemplateName: "fooTemplate", + }, + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Command: []string{"command"}, + Args: []string{"{{.Input}}"}, + }, + }, + Type: "test", + } + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.On("Read", mock.Anything).Return(task, nil) + + tCtx := &pluginsCoreMock.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(dummyTaskExecutionMetadata(&v1.ResourceRequirements{}, nil, "", nil)) + tCtx.OnTaskReader().Return(taskReader) + + resultPodSpec, resultObjectMeta, err := MergeWithBasePodTemplate(context.TODO(), tCtx, &podSpec, &objectMeta, "foo", "foo-init") + assert.Nil(t, err) + + // test that template podSpec is merged + primaryContainer := resultPodSpec.Containers[0] + assert.Equal(t, podSpec.Containers[0].Name, primaryContainer.Name) + assert.Equal(t, primaryContainerTemplate.TerminationMessagePath, primaryContainer.TerminationMessagePath) + + // test that template object metadata is copied + assert.Contains(t, resultObjectMeta.Labels, "fooKey") + assert.Equal(t, resultObjectMeta.Labels["fooKey"], "barValue") + assert.Contains(t, resultObjectMeta.Labels, "barKey") + assert.Equal(t, resultObjectMeta.Labels["barKey"], "bazValue") + }) +} + +func TestMergeBasePodSpecsOntoTemplate(t *testing.T) { + + baseContainer1 := v1.Container{ + Name: "task-1", + Image: "task-image", + } + + baseContainer2 := v1.Container{ + Name: "task-2", + Image: "task-image", + } + + initContainer1 := v1.Container{ + Name: "task-init-1", + Image: "task-init-image", + } + + initContainer2 := v1.Container{ + Name: "task-init-2", + Image: "task-init-image", + } + + tests := []struct { + name string + templatePodSpec *v1.PodSpec + basePodSpec *v1.PodSpec + primaryContainerName string + primaryInitContainerName string + expectedResult *v1.PodSpec + expectedError error + }{ + { + name: "nil template", + templatePodSpec: nil, + basePodSpec: &v1.PodSpec{}, + expectedError: errors.New("neither the templatePodSpec or the basePodSpec can be nil"), + }, + { + name: "nil base", + templatePodSpec: &v1.PodSpec{}, + basePodSpec: nil, + expectedError: errors.New("neither the templatePodSpec or the basePodSpec can be nil"), + }, + { + name: "nil template and base", + templatePodSpec: nil, + basePodSpec: nil, + expectedError: errors.New("neither the templatePodSpec or the basePodSpec can be nil"), + }, + { + name: "template and base with no overlap", + templatePodSpec: &v1.PodSpec{ + SchedulerName: "templateScheduler", + }, + basePodSpec: &v1.PodSpec{ + ServiceAccountName: "baseServiceAccount", + }, + expectedResult: &v1.PodSpec{ + SchedulerName: "templateScheduler", + ServiceAccountName: "baseServiceAccount", + }, + }, + { + name: "template and base with overlap", + templatePodSpec: &v1.PodSpec{ + SchedulerName: "templateScheduler", + }, + basePodSpec: &v1.PodSpec{ + SchedulerName: "baseScheduler", + ServiceAccountName: "baseServiceAccount", + }, + expectedResult: &v1.PodSpec{ + SchedulerName: "baseScheduler", + ServiceAccountName: "baseServiceAccount", + }, + }, + { + name: "template with default containers and base with no containers", + templatePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "default", + Image: "default-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "default-init", + Image: "default-init-image", + }, + }, + }, + basePodSpec: &v1.PodSpec{ + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + SchedulerName: "baseScheduler", + }, + }, + { + name: "template with no default containers and base containers", + templatePodSpec: &v1.PodSpec{}, + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1}, + InitContainers: []v1.Container{initContainer1}, + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1}, + InitContainers: []v1.Container{initContainer1}, + SchedulerName: "baseScheduler", + }, + }, + { + name: "template and base with matching containers", + templatePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "default-task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "default-task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + }, + primaryContainerName: "task-1", + primaryInitContainerName: "task-init-1", + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1}, + InitContainers: []v1.Container{initContainer1}, + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + SchedulerName: "baseScheduler", + }, + }, + { + name: "template and base with no matching containers", + templatePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "not-matching", + Image: "default-task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "not-matching-init", + Image: "default-task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + }, + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1}, + InitContainers: []v1.Container{initContainer1}, + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1}, + InitContainers: []v1.Container{initContainer1}, + SchedulerName: "baseScheduler", + }, + }, + { + name: "template with default containers and base with containers", + templatePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "default", + Image: "default-task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "default-init", + Image: "default-task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + }, + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1, baseContainer2}, + InitContainers: []v1.Container{initContainer1, initContainer2}, + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + { + Name: "task-2", + Image: "task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + { + Name: "task-init-2", + Image: "task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + SchedulerName: "baseScheduler", + }, + }, + { + name: "template with primary containers and base with containers", + templatePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary", + Image: "default-task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + }, + InitContainers: []v1.Container{ + { + Name: "primary-init", + Image: "default-task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + }, + }, + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{baseContainer1, baseContainer2}, + InitContainers: []v1.Container{initContainer1, initContainer2}, + SchedulerName: "baseScheduler", + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + TerminationMessagePath: "/dev/template-termination-log", + }, + baseContainer2, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + TerminationMessagePath: "/dev/template-init-termination-log", + }, + initContainer2, + }, + SchedulerName: "baseScheduler", + }, + primaryContainerName: "task-1", + primaryInitContainerName: "task-init-1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, mergeErr := MergeBasePodSpecOntoTemplate(tt.templatePodSpec, tt.basePodSpec, tt.primaryContainerName, tt.primaryInitContainerName) + assert.Equal(t, tt.expectedResult, result) + assert.Equal(t, tt.expectedError, mergeErr) + }) + } +} + +func TestMergeOverlayPodSpecOntoBase(t *testing.T) { + + tests := []struct { + name string + basePodSpec *v1.PodSpec + overlayPodSpec *v1.PodSpec + expectedResult *v1.PodSpec + expectedError error + }{ + { + name: "nil overlay", + basePodSpec: &v1.PodSpec{}, + overlayPodSpec: nil, + expectedError: errors.New("neither the basePodSpec or the overlayPodSpec can be nil"), + }, + { + name: "nil base", + basePodSpec: nil, + overlayPodSpec: &v1.PodSpec{}, + expectedError: errors.New("neither the basePodSpec or the overlayPodSpec can be nil"), + }, + { + name: "nil base and overlay", + basePodSpec: nil, + overlayPodSpec: nil, + expectedError: errors.New("neither the basePodSpec or the overlayPodSpec can be nil"), + }, + { + name: "base and overlay no overlap", + basePodSpec: &v1.PodSpec{ + SchedulerName: "baseScheduler", + }, + overlayPodSpec: &v1.PodSpec{ + ServiceAccountName: "overlayServiceAccount", + }, + expectedResult: &v1.PodSpec{ + SchedulerName: "baseScheduler", + ServiceAccountName: "overlayServiceAccount", + }, + }, + { + name: "template and base with overlap", + basePodSpec: &v1.PodSpec{ + SchedulerName: "baseScheduler", + }, + overlayPodSpec: &v1.PodSpec{ + SchedulerName: "overlayScheduler", + ServiceAccountName: "overlayServiceAccount", + }, + expectedResult: &v1.PodSpec{ + SchedulerName: "overlayScheduler", + ServiceAccountName: "overlayServiceAccount", + }, + }, + { + name: "template and base with matching containers", + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + }, + }, + }, + overlayPodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "overlay-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "overlay-init-image", + }, + }, + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "overlay-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "overlay-init-image", + }, + }, + }, + }, + { + name: "base and overlay with no matching containers", + basePodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + }, + }, + }, + overlayPodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "overlay-1", + Image: "overlay-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "overlay-init-1", + Image: "overlay-init-image", + }, + }, + }, + expectedResult: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "task-1", + Image: "task-image", + }, + }, + InitContainers: []v1.Container{ + { + Name: "task-init-1", + Image: "task-init-image", + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, mergeErr := MergeOverlayPodSpecOntoBase(tt.basePodSpec, tt.overlayPodSpec) + assert.Equal(t, tt.expectedResult, result) + assert.Equal(t, tt.expectedError, mergeErr) + }) + } +} + +func TestAddFlyteCustomizationsToContainer_SetConsoleUrl(t *testing.T) { + tests := []struct { + name string + includeConsoleURL bool + consoleURL string + expectedEnvVar *v1.EnvVar + }{ + { + name: "do not include console url and console url is not set", + includeConsoleURL: false, + consoleURL: "", + expectedEnvVar: nil, + }, + { + name: "include console url but console url is not set", + includeConsoleURL: false, + consoleURL: "", + expectedEnvVar: nil, + }, + { + name: "do not include console url but console url is set", + includeConsoleURL: false, + consoleURL: "gopher://flyte:65535/console", + expectedEnvVar: nil, + }, + { + name: "include console url and console url is set", + includeConsoleURL: true, + consoleURL: "gopher://flyte:65535/console", + expectedEnvVar: &v1.EnvVar{ + Name: flyteExecutionURL, + Value: "gopher://flyte:65535/console/projects/p2/domains/d2/executions/n2/nodeId/unique_node_id/nodes", + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + container := &v1.Container{ + Command: []string{ + "{{ .Input }}", + }, + Args: []string{ + "{{ .OutputPrefix }}", + }, + } + templateParameters := getTemplateParametersForTest(&v1.ResourceRequirements{}, &v1.ResourceRequirements{}, tt.includeConsoleURL, tt.consoleURL) + err := AddFlyteCustomizationsToContainer(context.TODO(), templateParameters, ResourceCustomizationModeAssignResources, container, nil) + assert.NoError(t, err) + if tt.expectedEnvVar == nil { + // Confirm that there is no env var FLYTE_EXECUTION_URL set + for _, envVar := range container.Env { + assert.NotEqual(t, "FLYTE_EXECUTION_URL", envVar.Name) + } + } + if tt.expectedEnvVar != nil { + // Assert that the env var FLYTE_EXECUTION_URL is set if its value is non-nil + for _, envVar := range container.Env { + if envVar.Name == tt.expectedEnvVar.Name { + assert.Equal(t, tt.expectedEnvVar.Value, envVar.Value) + return + } + } + t.Fail() + } + }) + } +} + +func TestAddTolerationsForExtendedResources(t *testing.T) { + gpuResourceName := v1.ResourceName("nvidia.com/gpu") + addTolerationResourceName := v1.ResourceName("foo/bar") + noTolerationResourceName := v1.ResourceName("foo/baz") + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: gpuResourceName, + AddTolerationsForExtendedResources: []string{ + gpuResourceName.String(), + addTolerationResourceName.String(), + }, + })) + + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + fmt.Printf("%v\n", podSpec.Tolerations) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) + + podSpec = &v1.PodSpec{ + InitContainers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) + + podSpec = &v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + gpuResourceName: resource.MustParse("1"), + addTolerationResourceName: resource.MustParse("1"), + noTolerationResourceName: resource.MustParse("1"), + }, + }, + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "foo", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: gpuResourceName.String(), + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + } + + podSpec = AddTolerationsForExtendedResources(podSpec) + assert.Equal(t, 3, len(podSpec.Tolerations)) + assert.Equal(t, gpuResourceName.String(), podSpec.Tolerations[1].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[1].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[1].Effect) + assert.Equal(t, addTolerationResourceName.String(), podSpec.Tolerations[2].Key) + assert.Equal(t, v1.TolerationOpExists, podSpec.Tolerations[2].Operator) + assert.Equal(t, v1.TaintEffectNoSchedule, podSpec.Tolerations[2].Effect) +} + +func TestApplyExtendedResourcesOverridesSharedMemory(t *testing.T) { + SharedMemory := &core.ExtendedResources{ + SharedMemory: &core.SharedMemory{ + MountName: "flyte-shared-memory", + MountPath: "/dev/shm", + }, + } + + newSharedMemory := &core.ExtendedResources{ + SharedMemory: &core.SharedMemory{ + MountName: "flyte-shared-memory-v2", + MountPath: "/dev/shm", + }, + } + + t.Run("base is nil", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(nil, SharedMemory) + assert.EqualValues( + t, + SharedMemory.GetSharedMemory(), + final.GetSharedMemory(), + ) + }) + + t.Run("overrides is nil", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(SharedMemory, nil) + assert.EqualValues( + t, + SharedMemory.GetSharedMemory(), + final.GetSharedMemory(), + ) + }) + + t.Run("merging", func(t *testing.T) { + final := ApplyExtendedResourcesOverrides(SharedMemory, newSharedMemory) + assert.EqualValues( + t, + newSharedMemory.GetSharedMemory(), + final.GetSharedMemory(), + ) + }) +} + +func TestApplySharedMemoryErrors(t *testing.T) { + + type test struct { + name string + podSpec *v1.PodSpec + primaryContainerName string + sharedVolume *core.SharedMemory + errorMsg string + } + + tests := []test{ + { + name: "No mount name", + podSpec: nil, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountPath: "/dev/shm"}, + errorMsg: "mount name is not set", + }, + { + name: "No mount path name", + podSpec: nil, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory"}, + errorMsg: "mount path is not set", + }, + { + name: "No primary container", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "secondary", + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm"}, + errorMsg: "Unable to find primary container", + }, + + { + name: "Volume already exists in spec", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + }}, + Volumes: []v1.Volume{{ + Name: "flyte-shared-memory", + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm"}, + errorMsg: "A volume is already named flyte-shared-memory in pod spec", + }, + { + name: "Volume already in container", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + VolumeMounts: []v1.VolumeMount{{ + Name: "flyte-shared-memory", + MountPath: "/dev/shm", + }}, + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm"}, + errorMsg: "A volume is already named flyte-shared-memory in container", + }, + { + name: "Mount path already in container", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + VolumeMounts: []v1.VolumeMount{{ + Name: "flyte-shared-memory-v2", + MountPath: "/dev/shm", + }}, + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm"}, + errorMsg: "/dev/shm is already mounted in container", + }, + { + name: "Mount path already in container", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm", SizeLimit: "bad-name"}, + errorMsg: "Unable to parse size limit: bad-name", + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ApplySharedMemory(test.podSpec, test.primaryContainerName, test.sharedVolume) + assert.Errorf(t, err, test.errorMsg) + }) + } +} + +func TestApplySharedMemory(t *testing.T) { + + type test struct { + name string + podSpec *v1.PodSpec + primaryContainerName string + sharedVolume *core.SharedMemory + } + + tests := []test{ + { + name: "No size limit works", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm"}, + }, + { + name: "With size limits works", + podSpec: &v1.PodSpec{ + Containers: []v1.Container{{ + Name: "primary", + }}, + }, + primaryContainerName: "primary", + sharedVolume: &core.SharedMemory{MountName: "flyte-shared-memory", MountPath: "/dev/shm", SizeLimit: "2Gi"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + err := ApplySharedMemory(test.podSpec, test.primaryContainerName, test.sharedVolume) + assert.NoError(t, err) + + assert.Len(t, test.podSpec.Volumes, 1) + assert.Len(t, test.podSpec.Containers[0].VolumeMounts, 1) + + assert.Equal( + t, + test.podSpec.Containers[0].VolumeMounts[0], + v1.VolumeMount{ + Name: test.sharedVolume.GetMountName(), + MountPath: test.sharedVolume.GetMountPath(), + }, + ) + + var quantity resource.Quantity + if test.sharedVolume.GetSizeLimit() != "" { + quantity, err = resource.ParseQuantity(test.sharedVolume.GetSizeLimit()) + assert.NoError(t, err) + } + + assert.Equal( + t, + test.podSpec.Volumes[0], + v1.Volume{ + Name: test.sharedVolume.GetMountName(), + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory, SizeLimit: &quantity}, + }, + }, + ) + + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store.go new file mode 100644 index 0000000000..102a9c534d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store.go @@ -0,0 +1,92 @@ +package flytek8s + +import ( + "context" + "sync" + + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/tools/cache" + + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +var DefaultPodTemplateStore PodTemplateStore = NewPodTemplateStore() + +// PodTemplateStore maintains a thread-safe mapping of active PodTemplates with their associated +// namespaces. +type PodTemplateStore struct { + *sync.Map + defaultNamespace string +} + +// NewPodTemplateStore initializes a new PodTemplateStore +func NewPodTemplateStore() PodTemplateStore { + return PodTemplateStore{ + Map: &sync.Map{}, + } +} + +// Delete removes the specified PodTemplate from the store. +func (p *PodTemplateStore) Delete(podTemplate *v1.PodTemplate) { + if value, ok := p.Load(podTemplate.Name); ok { + podTemplates := value.(*sync.Map) + podTemplates.Delete(podTemplate.Namespace) + logger.Debugf(context.Background(), "deleted PodTemplate '%s:%s' from store", podTemplate.Namespace, podTemplate.Name) + + // we specifically are not deleting empty maps from the store because this may introduce race + // conditions where a PodTemplate is being added to the 2nd dimension map while the top level map + // is concurrently being deleted. + } +} + +// LoadOrDefault returns the PodTemplate with the specified name in the given namespace. If one +// does not exist it attempts to retrieve the one associated with the defaultNamespace. +func (p *PodTemplateStore) LoadOrDefault(namespace string, podTemplateName string) *v1.PodTemplate { + if value, ok := p.Load(podTemplateName); ok { + podTemplates := value.(*sync.Map) + if podTemplate, ok := podTemplates.Load(namespace); ok { + return podTemplate.(*v1.PodTemplate) + } + + if podTemplate, ok := podTemplates.Load(p.defaultNamespace); ok { + return podTemplate.(*v1.PodTemplate) + } + } + + return nil +} + +// SetDefaultNamespace sets the default namespace for the PodTemplateStore. +func (p *PodTemplateStore) SetDefaultNamespace(namespace string) { + p.defaultNamespace = namespace +} + +// Store loads the specified PodTemplate into the store. +func (p *PodTemplateStore) Store(podTemplate *v1.PodTemplate) { + value, _ := p.LoadOrStore(podTemplate.Name, &sync.Map{}) + podTemplates := value.(*sync.Map) + podTemplates.Store(podTemplate.Namespace, podTemplate) + logger.Debugf(context.Background(), "registered PodTemplate '%s:%s' in store", podTemplate.Namespace, podTemplate.Name) +} + +// GetPodTemplateUpdatesHandler returns a new ResourceEventHandler which adds / removes +// PodTemplates to / from the provided PodTemplateStore. +func GetPodTemplateUpdatesHandler(store *PodTemplateStore) cache.ResourceEventHandler { + return cache.ResourceEventHandlerFuncs{ + AddFunc: func(obj interface{}) { + if podTemplate, ok := obj.(*v1.PodTemplate); ok { + store.Store(podTemplate) + } + }, + UpdateFunc: func(old, new interface{}) { + if podTemplate, ok := new.(*v1.PodTemplate); ok { + store.Store(podTemplate) + } + }, + DeleteFunc: func(obj interface{}) { + if podTemplate, ok := obj.(*v1.PodTemplate); ok { + store.Delete(podTemplate) + } + }, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store_test.go new file mode 100644 index 0000000000..1760a1014a --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/pod_template_store_test.go @@ -0,0 +1,102 @@ +package flytek8s + +import ( + "context" + "reflect" + "testing" + "time" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" +) + +const ( + namespace = "foo" +) + +func TestPodTemplateStore(t *testing.T) { + ctx := context.TODO() + + podTemplate := &v1.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: "defaultPodTemplate", + Namespace: "defaultNamespace", + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + v1.Container{ + Command: []string{"flytepropeller"}, + Args: []string{"--config", "/etc/flyte/config/*.yaml"}, + }, + }, + }, + }, + } + + store := NewPodTemplateStore() + store.SetDefaultNamespace(podTemplate.Namespace) + + kubeClient := fake.NewSimpleClientset() + informerFactory := informers.NewSharedInformerFactoryWithOptions(kubeClient, 30*time.Second) + + updateHandler := GetPodTemplateUpdatesHandler(&store) + _, err := informerFactory.Core().V1().PodTemplates().Informer().AddEventHandler(updateHandler) + assert.NoError(t, err) + go informerFactory.Start(ctx.Done()) + + // create the podTemplate + _, err = kubeClient.CoreV1().PodTemplates(podTemplate.Namespace).Create(ctx, podTemplate, metav1.CreateOptions{}) + assert.NoError(t, err) + + time.Sleep(50 * time.Millisecond) + createPodTemplate := store.LoadOrDefault(podTemplate.Namespace, podTemplate.Name) + assert.NotNil(t, createPodTemplate) + assert.True(t, reflect.DeepEqual(podTemplate, createPodTemplate)) + + // non-default namespace podTemplate does not exist + newNamespacePodTemplate := podTemplate.DeepCopy() + newNamespacePodTemplate.Namespace = namespace + + nonDefaultNamespacePodTemplate := store.LoadOrDefault(newNamespacePodTemplate.Namespace, newNamespacePodTemplate.Name) + assert.NotNil(t, nonDefaultNamespacePodTemplate) + assert.True(t, reflect.DeepEqual(podTemplate, nonDefaultNamespacePodTemplate)) + + // non-default name podTemplate does not exist + newNamePodTemplate := podTemplate.DeepCopy() + newNamePodTemplate.Name = namespace + + nonDefaultNamePodTemplate := store.LoadOrDefault(newNamePodTemplate.Namespace, newNamePodTemplate.Name) + assert.Nil(t, nonDefaultNamePodTemplate) + + // non-default namespace podTemplate exists + _, err = kubeClient.CoreV1().PodTemplates(newNamespacePodTemplate.Namespace).Create(ctx, newNamespacePodTemplate, metav1.CreateOptions{}) + assert.NoError(t, err) + + time.Sleep(50 * time.Millisecond) + createNewNamespacePodTemplate := store.LoadOrDefault(newNamespacePodTemplate.Namespace, newNamespacePodTemplate.Name) + assert.NotNil(t, createNewNamespacePodTemplate) + assert.True(t, reflect.DeepEqual(newNamespacePodTemplate, createNewNamespacePodTemplate)) + + // update the podTemplate + updatedPodTemplate := podTemplate.DeepCopy() + updatedPodTemplate.Template.Spec.RestartPolicy = v1.RestartPolicyNever + _, err = kubeClient.CoreV1().PodTemplates(podTemplate.Namespace).Update(ctx, updatedPodTemplate, metav1.UpdateOptions{}) + assert.NoError(t, err) + + time.Sleep(50 * time.Millisecond) + updatePodTemplate := store.LoadOrDefault(podTemplate.Namespace, podTemplate.Name) + assert.NotNil(t, updatePodTemplate) + assert.True(t, reflect.DeepEqual(updatedPodTemplate, updatePodTemplate)) + + // delete the podTemplate in the namespace + err = kubeClient.CoreV1().PodTemplates(podTemplate.Namespace).Delete(ctx, podTemplate.Name, metav1.DeleteOptions{}) + assert.NoError(t, err) + + time.Sleep(50 * time.Millisecond) + deletePodTemplate := store.LoadOrDefault(podTemplate.Namespace, podTemplate.Name) + assert.Nil(t, deletePodTemplate) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/resourcecustomizationmode_enumer.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/resourcecustomizationmode_enumer.go new file mode 100644 index 0000000000..bf25f1bf06 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/resourcecustomizationmode_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer -type=ResourceCustomizationMode -trimprefix=ResourceCustomizationMode"; DO NOT EDIT. + +package flytek8s + +import ( + "fmt" +) + +const _ResourceCustomizationModeName = "AssignResourcesMergeExistingResourcesEnsureExistingResourcesInRange" + +var _ResourceCustomizationModeIndex = [...]uint8{0, 15, 37, 67} + +func (i ResourceCustomizationMode) String() string { + if i < 0 || i >= ResourceCustomizationMode(len(_ResourceCustomizationModeIndex)-1) { + return fmt.Sprintf("ResourceCustomizationMode(%d)", i) + } + return _ResourceCustomizationModeName[_ResourceCustomizationModeIndex[i]:_ResourceCustomizationModeIndex[i+1]] +} + +var _ResourceCustomizationModeValues = []ResourceCustomizationMode{0, 1, 2} + +var _ResourceCustomizationModeNameToValueMap = map[string]ResourceCustomizationMode{ + _ResourceCustomizationModeName[0:15]: 0, + _ResourceCustomizationModeName[15:37]: 1, + _ResourceCustomizationModeName[37:67]: 2, +} + +// ResourceCustomizationModeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func ResourceCustomizationModeString(s string) (ResourceCustomizationMode, error) { + if val, ok := _ResourceCustomizationModeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to ResourceCustomizationMode values", s) +} + +// ResourceCustomizationModeValues returns all values of the enum +func ResourceCustomizationModeValues() []ResourceCustomizationMode { + return _ResourceCustomizationModeValues +} + +// IsAResourceCustomizationMode returns "true" if the value is listed in the enum definition. "false" otherwise +func (i ResourceCustomizationMode) IsAResourceCustomizationMode() bool { + for _, v := range _ResourceCustomizationModeValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/config.yaml b/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/config.yaml new file mode 100644 index 0000000000..a34968682b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/config.yaml @@ -0,0 +1,53 @@ +# Sample plugins config +plugins: + # All k8s plugins default configuration + k8s: + scheduler-name: flyte-scheduler + default-cpus: 1024m + default-memory: 1024Mi + default-annotations: + - annotationKey1: annotationValue1 + - annotationKey2: annotationValue2 + default-labels: + - label1: labelValue1 + - label2: labelValue2 + resource-tolerations: + nvidia.com/gpu: + key: flyte/gpu + value: dedicated + operator: Equal + effect: NoSchedule + storage: + - key: storage + value: special + operator: Equal + effect: PreferNoSchedule + interruptible-node-selector: + - x/interruptible: "true" + interruptible-tolerations: + - key: x/flyte + value: interruptible + operator: Equal + effect: NoSchedule + interruptible-node-selector-requirement: + key: x/interruptible + operator: In + values: + - "true" + non-interruptible-node-selector-requirement: + key: x/interruptible + operator: DoesNotExist + default-env-vars: + - AWS_METADATA_SERVICE_TIMEOUT: 5 + - AWS_METADATA_SERVICE_NUM_ATTEMPTS: 20 + - FLYTE_AWS_ENDPOINT: "http://minio.flyte:9000" + - FLYTE_AWS_ACCESS_KEY_ID: minio + - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + default-node-selector: + user: 'default' + default-pod-security-context: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + default-security-context: + allowPrivilegeEscalation: false diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/imagepull-failurepod.json b/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/imagepull-failurepod.json new file mode 100644 index 0000000000..ab39951e02 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/testdata/imagepull-failurepod.json @@ -0,0 +1,400 @@ +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "annotations": { + "cluster-autoscaler.kubernetes.io/safe-to-evict": "false", + "flyte.flyte.net/deployment": "production", + "iam.amazonaws.com/role": "role", + "flyte.net/iamwait-inject": "injected" + }, + "creationTimestamp": "2020-04-06T21:15:03Z", + "labels": { + "app": "flyte-user-service", + "environment": "staging", + "execution-id": "fdf98e4ed2b524dc3bf7", + "interruptible": "false", + "flyte.net/iamwait-gojson-tag": "8a3b1cb9dbb132b1d973b7a8ce9da8220429e8c0", + "node-id": "get-flyte-id-task", + "task-name": "common-library-utils-get-flyte-id", + "version": "flyte-version", + "workflow-name": "compositionworkflow" + }, + "name": "fdf98e4ed2b524dc3bf7-get-flyte-id-task-0", + "namespace": "project", + "ownerReferences": [ + { + "apiVersion": "flyte.flyte.com/v1alpha1", + "blockOwnerDeletion": true, + "controller": true, + "kind": "flyteworkflow", + "name": "fdf98e4ed2b524dc3bf7", + "uid": "ae02751f-784b-11ea-96a3-0e75025c25bf" + } + ], + "resourceVersion": "185246404", + "selfLink": "/api/v1/namespaces/project/pods/fdf98e4ed2b524dc3bf7-get-flyte-id-task-0", + "uid": "ae7a8c54-784b-11ea-92a3-1298c81fec7f" + }, + "spec": { + "containers": [ + { + "args": [ + "service_venv", + "pyflyte-execute", + "--task-module", + "common.library.utils", + "--task-name", + "get_flyte_id", + "--inputs", + "s3://flyte/metadata/propeller/production/project-fdf98e4ed2b524dc3bf7/get-flyte-id-task/data/inputs.pb", + "--output-prefix", + "s3://flyte/metadata/propeller/production/project-fdf98e4ed2b524dc3bf7/get-flyte-id-task/data/0" + ], + "env": [ + { + "name": "ENABLE_FLYTE2", + "value": "1" + }, + { + "name": "FLYTE_AWS_S3_SHARD_FORMATTER", + "value": "s3://project/{}/development" + }, + { + "name": "FLYTE_AWS_S3_SHARD_STRING_LENGTH", + "value": "2" + }, + { + "name": "FLYTE_INTERNAL_DOMAIN", + "value": "development" + }, + { + "name": "FLYTE_INTERNAL_PROJECT", + "value": "project" + }, + { + "name": "FLYTE_INTERNAL_VERSION", + "value": "3c6869db7101c619908f2b568fa851a9df0016c2" + }, + { + "name": "FLYTE_INTERNAL_IMAGE", + "value": "image" + }, + { + "name": "FLYTE_PLATFORM_URL", + "value": "flyte.flyte.net" + }, + { + "name": "FLYTE_SDK_PYTHON_VENV", + "value": "service_venv" + }, + { + "name": "FLYTE_SDK_EXECUTION_ENGINE", + "value": "flyte" + }, + { + "name": "FLYTE_SDK_TYPE_ENGINES", + "value": "flyte_modelbuilder.api.internal.flyte2_shims.type_engine.Flyte1to2TypeEngine" + }, + { + "name": "FLYTE_SDK_LOCAL_SANDBOX", + "value": "/tmp/modelbuilder/" + }, + { + "name": "FLYTE_SDK_WORKFLOW_PACKAGES", + "value": "common.workflows,fare.workflows,multimode.workflows,multimode.workflows.disco,pisco.workflows,pisco.workflows.anchor,pisco.workflows.csv_report,pisco.workflows.elasticity,primetime.workflows,tolls.workflows" + }, + { + "name": "FLYTE_SDK_NAME_FORMAT", + "value": "{name}" + }, + { + "name": "FLYTE_SDK_TASK_NAME_FORMAT", + "value": "{module}.{name}" + }, + { + "name": "PYTHONPATH", + "value": ":/srv/service/current:/srv/service/current:/srv/service/current" + }, + { + "name": "SERVICE_NAME", + "value": "project" + }, + { + "name": "SERVICE_REPO_NAME", + "value": "project" + }, + { + "name": "SERVICE_INSTANCE", + "value": "development" + }, + { + "name": "APPLICATION_ENV", + "value": "development" + }, + { + "name": "IMAGE_VERSION", + "value": "3c6869db7101c619908f2b568fa851a9df0016c2" + }, + { + "name": "FLYTE_SPARK_EXECUTION_ENGINE", + "value": "kubernetes" + }, + { + "name": "FLYTE_PLATFORM", + "value": "production" + }, + { + "name": "FLYTE_INTERNAL_CONFIGURATION_PATH", + "value": "flytekit.config" + }, + { + "name": "FLYTE_INTERNAL_NAME" + }, + { + "name": "FLYTE_INTERNAL_EXECUTION_WORKFLOW", + "value": "project:development:CompositionWorkflow" + }, + { + "name": "FLYTE_INTERNAL_EXECUTION_ID", + "value": "fdf98e4ed2b524dc3bf7" + }, + { + "name": "FLYTE_INTERNAL_EXECUTION_PROJECT", + "value": "project" + }, + { + "name": "FLYTE_INTERNAL_EXECUTION_DOMAIN", + "value": "development" + }, + { + "name": "FLYTE_INTERNAL_TASK_PROJECT", + "value": "project" + }, + { + "name": "FLYTE_INTERNAL_TASK_DOMAIN", + "value": "development" + }, + { + "name": "FLYTE_INTERNAL_TASK_NAME", + "value": "common.library.utils.get_flyte_id" + }, + { + "name": "FLYTE_INTERNAL_TASK_VERSION", + "value": "3c6869db7101c619908f2b568fa851a9df0016c2" + }, + { + "name": "FLYTE_INTERNAL_PROJECT", + "value": "project" + }, + { + "name": "FLYTE_INTERNAL_DOMAIN", + "value": "development" + }, + { + "name": "FLYTE_INTERNAL_NAME", + "value": "common.library.utils.get_flyte_id" + }, + { + "name": "FLYTE_INTERNAL_VERSION", + "value": "3c6869db7101c619908f2b568fa851a9df0016c2" + }, + { + "name": "AWS_RETRY_MODE", + "value": "standard" + }, + { + "name": "AWS_METADATA_SERVICE_TIMEOUT", + "value": "5" + }, + { + "name": "AWS_METADATA_SERVICE_NUM_ATTEMPTS", + "value": "20" + }, + { + "name": "EMIT_CONTAINER_METRICS", + "value": "true" + }, + { + "name": "FLYTE_STATSD_HOST", + "value": "stats.statsagent" + }, + { + "name": "FLYTE_CREDENTIALS_AUTH_MODE", + "value": "basic" + }, + { + "name": "FLYTE_CREDENTIALS_AUTHORIZATION_METADATA_KEY", + "value": "flyte-authorization" + }, + { + "name": "FLYTE_CREDENTIALS_SCOPE", + "value": "svc" + } + ], + "image": "image", + "imagePullPolicy": "IfNotPresent", + "lifecycle": { + "preStop": { + "exec": { + "command": [ + "k8s-shutdown" + ] + } + } + }, + "name": "fdf98e4ed2b524dc3bf7-get-flyte-id-task-0", + "resources": { + "limits": { + "cpu": "2", + "memory": "2Gi" + }, + "requests": { + "cpu": "2", + "memory": "2Gi" + } + }, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File", + "volumeMounts": [ + { + "mountPath": "/var/run/secrets/kubernetes.io/serviceaccount", + "name": "default-token-rr2ws", + "readOnly": true + } + ] + } + ], + "dnsPolicy": "ClusterFirst", + "enableServiceLinks": true, + "imagePullSecrets": [ + ], + "initContainers": [ + { + "command": [ + "iamwait", + "-timeout=120s" + ], + "image": "iamwait:8a3b1cb9dbb132b1d973b7a8ce9da8220429e8c0", + "imagePullPolicy": "IfNotPresent", + "name": "iamwait-gojson", + "resources": { + "limits": { + "cpu": "1", + "memory": "1Gi" + }, + "requests": { + "cpu": "20m", + "memory": "10Mi" + } + }, + "terminationMessagePath": "/dev/termination-log", + "terminationMessagePolicy": "File" + } + ], + "nodeName": "ip-10-44-170-4.ec2.internal", + "priority": 0, + "restartPolicy": "Never", + "schedulerName": "default-scheduler", + "securityContext": {}, + "serviceAccount": "default", + "serviceAccountName": "default", + "terminationGracePeriodSeconds": 64, + "tolerations": [ + { + "effect": "NoExecute", + "key": "node.kubernetes.io/not-ready", + "operator": "Exists", + "tolerationSeconds": 300 + }, + { + "effect": "NoExecute", + "key": "node.kubernetes.io/unreachable", + "operator": "Exists", + "tolerationSeconds": 300 + } + ], + "volumes": [ + { + "name": "default-token-rr2ws", + "secret": { + "defaultMode": 420, + "secretName": "default-token-rr2ws" + } + } + ] + }, + "status": { + "conditions": [ + { + "lastProbeTime": null, + "lastTransitionTime": "2020-04-06T21:15:07Z", + "status": "True", + "type": "Initialized" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2020-04-06T21:15:03Z", + "message": "containers with unready status: [fdf98e4ed2b524dc3bf7-get-flyte-id-task-0]", + "reason": "ContainersNotReady", + "status": "False", + "type": "Ready" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2020-04-06T21:15:03Z", + "message": "containers with unready status: [fdf98e4ed2b524dc3bf7-get-flyte-id-task-0]", + "reason": "ContainersNotReady", + "status": "False", + "type": "ContainersReady" + }, + { + "lastProbeTime": null, + "lastTransitionTime": "2020-04-06T21:15:03Z", + "status": "True", + "type": "PodScheduled" + } + ], + "containerStatuses": [ + { + "image": "image", + "imageID": "", + "lastState": {}, + "name": "fdf98e4ed2b524dc3bf7-get-flyte-id-task-0", + "ready": false, + "restartCount": 0, + "state": { + "waiting": { + "message": "Back-off pulling image \"image\"", + "reason": "ImagePullBackOff" + } + } + } + ], + "hostIP": "10.44.170.4", + "initContainerStatuses": [ + { + "containerID": "x", + "image": "image", + "imageID": "image1", + "lastState": {}, + "name": "iamwait-gojson", + "ready": true, + "restartCount": 0, + "state": { + "terminated": { + "containerID": "x", + "exitCode": 0, + "finishedAt": "2020-04-06T21:15:06Z", + "reason": "Completed", + "startedAt": "2020-04-06T21:15:06Z" + } + } + } + ], + "phase": "Pending", + "podIP": "10.44.137.175", + "qosClass": "Guaranteed", + "startTime": "2020-04-06T21:15:03Z" + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go new file mode 100644 index 0000000000..a8b6c93448 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils.go @@ -0,0 +1,157 @@ +package flytek8s + +import ( + "strings" + + "github.com/pkg/errors" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + pluginmachinery_core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func ToK8sEnvVar(env []*core.KeyValuePair) []v1.EnvVar { + envVars := make([]v1.EnvVar, 0, len(env)) + for _, kv := range env { + envVars = append(envVars, v1.EnvVar{Name: kv.Key, Value: kv.Value}) + } + return envVars +} + +// TODO we should modify the container resources to contain a map of enum values? +// Also we should probably create tolerations / taints, but we could do that as a post process +func ToK8sResourceList(resources []*core.Resources_ResourceEntry) (v1.ResourceList, error) { + k8sResources := make(v1.ResourceList, len(resources)) + for _, r := range resources { + rVal := r.Value + v, err := resource.ParseQuantity(rVal) + if err != nil { + return nil, errors.Wrap(err, "Failed to parse resource as a valid quantity.") + } + switch r.Name { + case core.Resources_CPU: + if !v.IsZero() { + k8sResources[v1.ResourceCPU] = v + } + case core.Resources_MEMORY: + if !v.IsZero() { + k8sResources[v1.ResourceMemory] = v + } + case core.Resources_GPU: + if !v.IsZero() { + k8sResources[resourceGPU] = v + } + case core.Resources_EPHEMERAL_STORAGE: + if !v.IsZero() { + k8sResources[v1.ResourceEphemeralStorage] = v + } + } + } + return k8sResources, nil +} + +func ToK8sResourceRequirements(resources *core.Resources) (*v1.ResourceRequirements, error) { + res := &v1.ResourceRequirements{} + if resources == nil { + return res, nil + } + req, err := ToK8sResourceList(resources.Requests) + if err != nil { + return res, err + } + lim, err := ToK8sResourceList(resources.Limits) + if err != nil { + return res, err + } + res.Limits = lim + res.Requests = req + return res, nil +} + +// ApplyK8sResourceOverrides ensures that both resource requests and limits are set. +// This is required because we run user executions in namespaces bound with a project quota and the Kubernetes scheduler will reject requests omitting these. +// This function is called by plugins that don't necessarily construct a default flyte container (container and k8s pod tasks) +// and therefore don't already receive the ApplyResourceOverrides treatment and subsequent validation which handles adding sensible defaults for requests and limits. +func ApplyK8sResourceOverrides(teMetadata pluginmachinery_core.TaskExecutionMetadata, resources *v1.ResourceRequirements) v1.ResourceRequirements { + platformResources := teMetadata.GetPlatformResources() + if platformResources == nil { + platformResources = &v1.ResourceRequirements{} + } + + return ApplyResourceOverrides(*resources, *platformResources, assignIfUnset) +} + +func GetServiceAccountNameFromTaskExecutionMetadata(taskExecutionMetadata pluginmachinery_core.TaskExecutionMetadata) string { + var serviceAccount string + securityContext := taskExecutionMetadata.GetSecurityContext() + if securityContext.GetRunAs() != nil { + serviceAccount = securityContext.GetRunAs().GetK8SServiceAccount() + } + + // TO BE DEPRECATED + if len(serviceAccount) == 0 { + serviceAccount = taskExecutionMetadata.GetK8sServiceAccount() + } + + return serviceAccount +} + +// getNormalizedAcceleratorDevice returns the normalized name for the given device. +// This should map to the node label that the corresponding nodes are provisioned with. +// Falls back to the original device name if the device is not configured. +func GetNormalizedAcceleratorDevice(device string) string { + cfg := config.GetK8sPluginConfig() + if normalized, ok := cfg.AcceleratorDevices[strings.ToUpper(device)]; ok { + return normalized + } + return device +} + +// getAcceleratorResourceName returns the Kubernetes resource name for the given device class. +// Falls back to the legacy GpuResourceName if the device class is not configured. +func getAcceleratorResourceName(accelerator *core.GPUAccelerator) v1.ResourceName { + // Use the shared helper function to get the accelerator config + accelConfig := getAcceleratorConfig(accelerator) + return accelConfig.ResourceName +} + +// getAllAcceleratorResourceNames returns the Kubernetes resource names for all accelerator devices. +func getAllAcceleratorResourceNames() map[v1.ResourceName]struct{} { + cfg := config.GetK8sPluginConfig() + acceleratorResourceNames := make(map[v1.ResourceName]struct{}) + + // Add the legacy GPU resource name for backward compatibility + acceleratorResourceNames[cfg.GpuResourceName] = struct{}{} + + // Add resource names from all configured accelerator device classes + for _, deviceClassConfig := range cfg.AcceleratorDeviceClasses { + if deviceClassConfig.ResourceName != "" { + acceleratorResourceNames[deviceClassConfig.ResourceName] = struct{}{} + } + } + return acceleratorResourceNames +} + +// podRequiresAccelerator returns true if any container in the pod requires any accelerator devices. +func podRequiresAccelerator(podSpec *v1.PodSpec) bool { + acceleratorResourceNames := getAllAcceleratorResourceNames() + for _, cnt := range podSpec.Containers { + for resourceName := range acceleratorResourceNames { + if _, ok := cnt.Resources.Limits[resourceName]; ok { + return true + } + } + } + return false +} + +// getConfiguredDeviceClasses returns a list of configured device class names for logging purposes. +func getConfiguredDeviceClasses(deviceClasses map[string]config.AcceleratorDeviceClassConfig) []string { + classes := make([]string, 0, len(deviceClasses)) + for k := range deviceClasses { + classes = append(classes, k) + } + return classes +} diff --git a/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils_test.go b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils_test.go new file mode 100644 index 0000000000..02818c6a98 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/flytek8s/utils_test.go @@ -0,0 +1,529 @@ +package flytek8s + +import ( + "testing" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestToK8sEnvVar(t *testing.T) { + e := ToK8sEnvVar([]*core.KeyValuePair{ + {Key: "k1", Value: "v1"}, + {Key: "k2", Value: "v2"}, + }) + + assert.NotEmpty(t, e) + assert.Equal(t, []v1.EnvVar{ + {Name: "k1", Value: "v1"}, + {Name: "k2", Value: "v2"}, + }, e) + + e = ToK8sEnvVar(nil) + assert.Empty(t, e) +} + +func TestToK8sResourceList(t *testing.T) { + { + r, err := ToK8sResourceList([]*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_GPU, Value: "1"}, + {Name: core.Resources_MEMORY, Value: "1024Mi"}, + {Name: core.Resources_EPHEMERAL_STORAGE, Value: "1024Mi"}, + }) + + assert.NoError(t, err) + assert.NotEmpty(t, r) + assert.NotNil(t, r[v1.ResourceCPU]) + assert.Equal(t, resource.MustParse("250m"), r[v1.ResourceCPU]) + assert.Equal(t, resource.MustParse("1"), r[resourceGPU]) + assert.Equal(t, resource.MustParse("1024Mi"), r[v1.ResourceMemory]) + assert.Equal(t, resource.MustParse("1024Mi"), r[v1.ResourceEphemeralStorage]) + } + { + r, err := ToK8sResourceList([]*core.Resources_ResourceEntry{}) + assert.NoError(t, err) + assert.Empty(t, r) + } + { + _, err := ToK8sResourceList([]*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250x"}, + }) + assert.Error(t, err) + } + +} + +func TestToK8sResourceRequirements(t *testing.T) { + + { + r, err := ToK8sResourceRequirements(nil) + assert.NoError(t, err) + assert.NotNil(t, r) + assert.Empty(t, r.Limits) + assert.Empty(t, r.Requests) + } + { + r, err := ToK8sResourceRequirements(&core.Resources{ + Requests: nil, + Limits: nil, + }) + assert.NoError(t, err) + assert.NotNil(t, r) + assert.Empty(t, r.Limits) + assert.Empty(t, r.Requests) + } + { + r, err := ToK8sResourceRequirements(&core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + }, + }) + assert.NoError(t, err) + assert.NotNil(t, r) + assert.Equal(t, resource.MustParse("250m"), r.Requests[v1.ResourceCPU]) + assert.Equal(t, resource.MustParse("1024m"), r.Limits[v1.ResourceCPU]) + } + { + _, err := ToK8sResourceRequirements(&core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "blah"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + }, + }) + assert.Error(t, err) + } + { + _, err := ToK8sResourceRequirements(&core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "blah"}, + }, + }) + assert.Error(t, err) + } +} + +func TestGetServiceAccountNameFromTaskExecutionMetadata(t *testing.T) { + mockTaskExecMetadata := mocks.TaskExecutionMetadata{} + mockTaskExecMetadata.OnGetSecurityContext().Return(core.SecurityContext{ + RunAs: &core.Identity{K8SServiceAccount: "service-account"}, + }) + result := GetServiceAccountNameFromTaskExecutionMetadata(&mockTaskExecMetadata) + assert.Equal(t, "service-account", result) +} + +func TestGetServiceAccountNameFromServiceAccount(t *testing.T) { + mockTaskExecMetadata := mocks.TaskExecutionMetadata{} + mockTaskExecMetadata.OnGetSecurityContext().Return(core.SecurityContext{}) + mockTaskExecMetadata.OnGetK8sServiceAccount().Return("service-account") + result := GetServiceAccountNameFromTaskExecutionMetadata(&mockTaskExecMetadata) + assert.Equal(t, "service-account", result) +} + +func TestGetNormalizedAcceleratorDevice(t *testing.T) { + // Setup config with AcceleratorDevices mapping + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + AcceleratorDevices: map[string]string{ + "A100": "nvidia-tesla-a100", + "H100": "nvidia-h100", + "T4": "nvidia-tesla-t4", + "V5E": "tpu-v5-lite-podslice", + "V5P": "tpu-v5p-slice", + "INF2": "aws-neuron-inf2", + "TRN1": "aws-neuron-trn1", + "MI300X": "amd-mi300x", + "MI250X": "amd-mi250x", + "DL1": "habana-gaudi-dl1", + }, + })) + + t.Run("NVIDIA GPU normalization", func(t *testing.T) { + assert.Equal(t, "nvidia-tesla-a100", GetNormalizedAcceleratorDevice("A100")) + assert.Equal(t, "nvidia-h100", GetNormalizedAcceleratorDevice("H100")) + assert.Equal(t, "nvidia-tesla-t4", GetNormalizedAcceleratorDevice("T4")) + }) + + t.Run("Google TPU normalization", func(t *testing.T) { + assert.Equal(t, "tpu-v5-lite-podslice", GetNormalizedAcceleratorDevice("V5E")) + assert.Equal(t, "tpu-v5p-slice", GetNormalizedAcceleratorDevice("V5P")) + }) + + t.Run("AWS Neuron normalization", func(t *testing.T) { + assert.Equal(t, "aws-neuron-inf2", GetNormalizedAcceleratorDevice("INF2")) + assert.Equal(t, "aws-neuron-trn1", GetNormalizedAcceleratorDevice("TRN1")) + }) + + t.Run("AMD GPU normalization", func(t *testing.T) { + assert.Equal(t, "amd-mi300x", GetNormalizedAcceleratorDevice("MI300X")) + assert.Equal(t, "amd-mi250x", GetNormalizedAcceleratorDevice("MI250X")) + }) + + t.Run("Habana Gaudi normalization", func(t *testing.T) { + assert.Equal(t, "habana-gaudi-dl1", GetNormalizedAcceleratorDevice("DL1")) + }) + + t.Run("case insensitivity", func(t *testing.T) { + assert.Equal(t, "nvidia-tesla-a100", GetNormalizedAcceleratorDevice("a100")) + assert.Equal(t, "nvidia-tesla-a100", GetNormalizedAcceleratorDevice("A100")) + assert.Equal(t, "nvidia-h100", GetNormalizedAcceleratorDevice("h100")) + assert.Equal(t, "tpu-v5-lite-podslice", GetNormalizedAcceleratorDevice("v5e")) + assert.Equal(t, "aws-neuron-inf2", GetNormalizedAcceleratorDevice("inf2")) + }) + + t.Run("unmapped device fallback", func(t *testing.T) { + assert.Equal(t, "custom-device", GetNormalizedAcceleratorDevice("custom-device")) + assert.Equal(t, "unknown-gpu", GetNormalizedAcceleratorDevice("unknown-gpu")) + }) + + t.Run("empty device", func(t *testing.T) { + assert.Equal(t, "", GetNormalizedAcceleratorDevice("")) + }) +} + +func TestGetAcceleratorResourceName(t *testing.T) { + t.Run("returns device class specific resource name", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + }, + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + }, + "AMAZON_NEURON": { + ResourceName: "aws.amazon.com/neuron", + }, + "AMD_GPU": { + ResourceName: "amd.com/gpu", + }, + }, + })) + + // Test NVIDIA GPU + result := getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }) + assert.Equal(t, v1.ResourceName("nvidia.com/gpu"), result) + + // Test Google TPU + result = getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_GOOGLE_TPU, + }) + assert.Equal(t, v1.ResourceName("google.com/tpu"), result) + + // Test Amazon Neuron + result = getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_AMAZON_NEURON, + }) + assert.Equal(t, v1.ResourceName("aws.amazon.com/neuron"), result) + + // Test AMD GPU + result = getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_AMD_GPU, + }) + assert.Equal(t, v1.ResourceName("amd.com/gpu"), result) + }) + + t.Run("falls back to legacy GpuResourceName when device class not configured", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{}, + })) + + result := getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }) + assert.Equal(t, v1.ResourceName("nvidia.com/gpu"), result) + }) + + t.Run("falls back to legacy GpuResourceName when device class config not found", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "custom.gpu.resource", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + }, + }, + })) + + // Use NVIDIA_GPU (default, value 0) but it's not in the config, so should fallback + result := getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }) + assert.Equal(t, v1.ResourceName("custom.gpu.resource"), result) + }) + + t.Run("falls back to legacy GpuResourceName when accelerator is nil", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{}, + })) + + result := getAcceleratorResourceName(nil) + assert.Equal(t, v1.ResourceName("nvidia.com/gpu"), result) + }) + + t.Run("uses device class config even when resource name is empty string", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "", // Empty - should fallback + }, + }, + })) + + result := getAcceleratorResourceName(&core.GPUAccelerator{ + DeviceClass: core.GPUAccelerator_NVIDIA_GPU, + }) + // Should fallback to global GpuResourceName when device class resource name is empty + assert.Equal(t, v1.ResourceName("nvidia.com/gpu"), result) + }) +} + +func TestGetAllAcceleratorResourceNames(t *testing.T) { + t.Run("returns all configured accelerator resource names", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + }, + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + }, + "AMAZON_NEURON": { + ResourceName: "aws.amazon.com/neuron", + }, + "AMD_GPU": { + ResourceName: "amd.com/gpu", + }, + "HABANA_GAUDI": { + ResourceName: "habana.ai/gaudi", + }, + }, + })) + + result := getAllAcceleratorResourceNames() + + // Should include legacy GpuResourceName + assert.Contains(t, result, v1.ResourceName("nvidia.com/gpu")) + // Should include all configured accelerator resource names + assert.Contains(t, result, v1.ResourceName("google.com/tpu")) + assert.Contains(t, result, v1.ResourceName("aws.amazon.com/neuron")) + assert.Contains(t, result, v1.ResourceName("amd.com/gpu")) + assert.Contains(t, result, v1.ResourceName("habana.ai/gaudi")) + + // Should have exactly 5 unique resource names (nvidia.com/gpu appears in both legacy and new map) + assert.Equal(t, 5, len(result)) + }) + + t.Run("includes legacy GpuResourceName for backward compatibility", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "custom.gpu.resource", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{}, + })) + + result := getAllAcceleratorResourceNames() + + assert.Contains(t, result, v1.ResourceName("custom.gpu.resource")) + assert.Equal(t, 1, len(result)) + }) + + t.Run("ensures uniqueness when legacy and new map overlap", func(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + }, + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + }, + }, + })) + + result := getAllAcceleratorResourceNames() + + // Should deduplicate nvidia.com/gpu + assert.Contains(t, result, v1.ResourceName("nvidia.com/gpu")) + assert.Contains(t, result, v1.ResourceName("google.com/tpu")) + assert.Equal(t, 2, len(result)) + }) +} + +func TestPodRequiresAccelerator(t *testing.T) { + // Setup config with multiple accelerator types + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuResourceName: "nvidia.com/gpu", + AcceleratorDeviceClasses: map[string]config.AcceleratorDeviceClassConfig{ + "NVIDIA_GPU": { + ResourceName: "nvidia.com/gpu", + }, + "GOOGLE_TPU": { + ResourceName: "google.com/tpu", + }, + "AMAZON_NEURON": { + ResourceName: "aws.amazon.com/neuron", + }, + "AMD_GPU": { + ResourceName: "amd.com/gpu", + }, + "HABANA_GAUDI": { + ResourceName: "habana.ai/gaudi", + }, + }, + })) + + t.Run("pod with NVIDIA GPU resources returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with Google TPU resources returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "google.com/tpu": resource.MustParse("4"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with AWS Neuron resources returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "aws.amazon.com/neuron": resource.MustParse("2"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with AMD GPU resources returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "amd.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with Habana Gaudi resources returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "habana.ai/gaudi": resource.MustParse("1"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with no accelerator resources returns false", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + } + assert.False(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("pod with only CPU and memory returns false", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("500m"), + v1.ResourceMemory: resource.MustParse("512Mi"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + }, + }, + }, + } + assert.False(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("multiple containers with one having accelerator returns true", func(t *testing.T) { + podSpec := &v1.PodSpec{ + Containers: []v1.Container{ + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + }, + }, + }, + { + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "nvidia.com/gpu": resource.MustParse("1"), + }, + }, + }, + }, + } + assert.True(t, podRequiresAccelerator(podSpec)) + }) + + t.Run("empty pod spec returns false", func(t *testing.T) { + podSpec := &v1.PodSpec{} + assert.False(t, podRequiresAccelerator(podSpec)) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/google/config.go b/flyteplugins/go/tasks/pluginmachinery/google/config.go new file mode 100644 index 0000000000..ecd154e0d5 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/google/config.go @@ -0,0 +1,23 @@ +package google + +type TokenSourceFactoryType = string + +const ( + TokenSourceTypeDefault = "default" + TokenSourceTypeGkeTaskWorkloadIdentity = "gke-task-workload-identity" // #nosec +) + +type TokenSourceFactoryConfig struct { + // Type is type of TokenSourceFactory, possible values are 'default' or 'gke-task-workload-identity'. + // - 'default' uses default credentials, see https://cloud.google.com/iam/docs/service-accounts#default + Type TokenSourceFactoryType `json:"type" pflag:",Defines type of TokenSourceFactory, possible values are 'default' and 'gke-task-workload-identity'"` + + // Configuration for GKE task workload identity token source factory + GkeTaskWorkloadIdentityTokenSourceFactoryConfig GkeTaskWorkloadIdentityTokenSourceFactoryConfig `json:"gke-task-workload-identity" pflag:"Extra configuration for GKE task workload identity token source factory"` +} + +func GetDefaultConfig() TokenSourceFactoryConfig { + return TokenSourceFactoryConfig{ + Type: "default", + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/google/default_token_source_factory.go b/flyteplugins/go/tasks/pluginmachinery/google/default_token_source_factory.go new file mode 100644 index 0000000000..358202f605 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/google/default_token_source_factory.go @@ -0,0 +1,21 @@ +package google + +import ( + "context" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" +) + +type defaultTokenSource struct{} + +func (m *defaultTokenSource) GetTokenSource( + ctx context.Context, + identity Identity, +) (oauth2.TokenSource, error) { + return google.DefaultTokenSource(ctx) +} + +func NewDefaultTokenSourceFactory() (TokenSourceFactory, error) { + return &defaultTokenSource{}, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory.go b/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory.go new file mode 100644 index 0000000000..2f3c2355d7 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory.go @@ -0,0 +1,112 @@ +package google + +import ( + "context" + + "github.com/pkg/errors" + "golang.org/x/oauth2" + "google.golang.org/api/impersonate" + "google.golang.org/grpc/credentials/oauth" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + + pluginmachinery "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" +) + +const ( + gcpServiceAccountAnnotationKey = "iam.gke.io/gcp-service-account" + workflowIdentityDocURL = "https://cloud.google.com/kubernetes-engine/docs/how-to/workload-identity" +) + +var impersonationScopes = []string{"https://www.googleapis.com/auth/bigquery"} + +type GkeTaskWorkloadIdentityTokenSourceFactoryConfig struct { + RemoteClusterConfig pluginmachinery.ClusterConfig `json:"remoteClusterConfig" pflag:"Configuration of remote GKE cluster"` +} + +type gkeTaskWorkloadIdentityTokenSourceFactory struct { + kubeClient kubernetes.Interface +} + +func (m *gkeTaskWorkloadIdentityTokenSourceFactory) getGcpServiceAccount( + ctx context.Context, + identity Identity, +) (string, error) { + if identity.K8sServiceAccount == "" { + identity.K8sServiceAccount = "default" + } + serviceAccount, err := m.kubeClient.CoreV1().ServiceAccounts(identity.K8sNamespace).Get( + ctx, + identity.K8sServiceAccount, + metav1.GetOptions{}, + ) + if err != nil { + return "", errors.Wrapf(err, "failed to retrieve task k8s service account") + } + + for key, value := range serviceAccount.Annotations { + if key == gcpServiceAccountAnnotationKey { + return value, nil + } + } + + return "", errors.Errorf( + "[%v] annotation doesn't exist on k8s service account [%v/%v], read more at %v", + gcpServiceAccountAnnotationKey, + identity.K8sNamespace, + identity.K8sServiceAccount, + workflowIdentityDocURL) +} + +func (m *gkeTaskWorkloadIdentityTokenSourceFactory) GetTokenSource( + ctx context.Context, + identity Identity, +) (oauth2.TokenSource, error) { + gcpServiceAccount, err := m.getGcpServiceAccount(ctx, identity) + if err != nil { + return oauth.TokenSource{}, err + } + + return impersonate.CredentialsTokenSource(ctx, impersonate.CredentialsConfig{ + TargetPrincipal: gcpServiceAccount, + Scopes: impersonationScopes, + }) +} + +func getKubeClient( + config *GkeTaskWorkloadIdentityTokenSourceFactoryConfig, +) (*kubernetes.Clientset, error) { + var kubeCfg *rest.Config + var err error + if config.RemoteClusterConfig.Enabled { + kubeCfg, err = pluginmachinery.KubeClientConfig( + config.RemoteClusterConfig.Endpoint, + config.RemoteClusterConfig.Auth, + ) + if err != nil { + return nil, errors.Wrapf(err, "Error building kubeconfig") + } + } else { + kubeCfg, err = rest.InClusterConfig() + if err != nil { + return nil, errors.Wrapf(err, "Cannot get InCluster kubeconfig") + } + } + + kubeClient, err := kubernetes.NewForConfig(kubeCfg) + if err != nil { + return nil, errors.Wrapf(err, "Error building kubernetes clientset") + } + return kubeClient, err +} + +func NewGkeTaskWorkloadIdentityTokenSourceFactory( + config *GkeTaskWorkloadIdentityTokenSourceFactoryConfig, +) (TokenSourceFactory, error) { + kubeClient, err := getKubeClient(config) + if err != nil { + return nil, err + } + return &gkeTaskWorkloadIdentityTokenSourceFactory{kubeClient: kubeClient}, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory_test.go b/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory_test.go new file mode 100644 index 0000000000..ae88eb5451 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/google/gke_task_workload_identity_token_source_factory_test.go @@ -0,0 +1,64 @@ +package google + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/fake" +) + +func TestGetGcpServiceAccount(t *testing.T) { + ctx := context.TODO() + + t.Run("get GCP service account", func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "owner": "abc", + "iam.gke.io/gcp-service-account": "gcp-service-account", + }, + }}) + ts := gkeTaskWorkloadIdentityTokenSourceFactory{kubeClient: kubeClient} + gcpServiceAccount, err := ts.getGcpServiceAccount(ctx, Identity{ + K8sNamespace: "namespace", + K8sServiceAccount: "name", + }) + + assert.NoError(t, err) + assert.Equal(t, "gcp-service-account", gcpServiceAccount) + }) + + t.Run("no GCP service account", func(t *testing.T) { + kubeClient := fake.NewSimpleClientset() + ts := gkeTaskWorkloadIdentityTokenSourceFactory{kubeClient: kubeClient} + _, err := ts.getGcpServiceAccount(ctx, Identity{ + K8sNamespace: "namespace", + K8sServiceAccount: "name", + }) + + assert.ErrorContains(t, err, "failed to retrieve task k8s service account") + }) + + t.Run("no GCP service account annotation", func(t *testing.T) { + kubeClient := fake.NewSimpleClientset(&corev1.ServiceAccount{ + ObjectMeta: v1.ObjectMeta{ + Name: "name", + Namespace: "namespace", + Annotations: map[string]string{ + "owner": "abc", + }, + }}) + ts := gkeTaskWorkloadIdentityTokenSourceFactory{kubeClient: kubeClient} + _, err := ts.getGcpServiceAccount(ctx, Identity{ + K8sNamespace: "namespace", + K8sServiceAccount: "name", + }) + + assert.ErrorContains(t, err, "annotation doesn't exist on k8s service account") + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/google/token_source_factory.go b/flyteplugins/go/tasks/pluginmachinery/google/token_source_factory.go new file mode 100644 index 0000000000..18cd1b0a7d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/google/token_source_factory.go @@ -0,0 +1,33 @@ +package google + +import ( + "context" + + "github.com/pkg/errors" + "golang.org/x/oauth2" +) + +type Identity struct { + K8sNamespace string + K8sServiceAccount string +} + +type TokenSourceFactory interface { + GetTokenSource(ctx context.Context, identity Identity) (oauth2.TokenSource, error) +} + +func NewTokenSourceFactory(config TokenSourceFactoryConfig) (TokenSourceFactory, error) { + switch config.Type { + case TokenSourceTypeDefault: + return NewDefaultTokenSourceFactory() + case TokenSourceTypeGkeTaskWorkloadIdentity: + return NewGkeTaskWorkloadIdentityTokenSourceFactory( + &config.GkeTaskWorkloadIdentityTokenSourceFactoryConfig, + ) + } + + return nil, errors.Errorf( + "unknown token source type [%v], possible values are: 'default' and 'gke-task-workload-identity'", + config.Type, + ) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go new file mode 100644 index 0000000000..29368e7106 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token.go @@ -0,0 +1,91 @@ +package webapi + +import ( + "context" + "fmt" + + "k8s.io/utils/clock" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +type tokenAllocator struct { + clock clock.Clock +} + +func newTokenAllocator(c clock.Clock) tokenAllocator { + return tokenAllocator{ + clock: c, + } +} + +func (a tokenAllocator) allocateToken(ctx context.Context, p webapi.AsyncPlugin, tCtx core.TaskExecutionContext, state *State, metrics Metrics) ( + newState *State, phaseInfo core.PhaseInfo, err error) { + if len(p.GetConfig().ResourceQuotas) == 0 { + // No quota, return success + return &State{ + AllocationTokenRequestStartTime: a.clock.Now(), + Phase: PhaseAllocationTokenAcquired, + }, core.PhaseInfoQueued(a.clock.Now(), 0, "No allocation token required"), nil + } + + ns, constraints, err := p.ResourceRequirements(ctx, tCtx) + if err != nil { + logger.Errorf(ctx, "Failed to calculate resource requirements for task. Error: %v", err) + return nil, core.PhaseInfo{}, err + } + + token := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + allocationStatus, err := tCtx.ResourceManager().AllocateResource(ctx, ns, token, constraints) + if err != nil { + logger.Errorf(ctx, "Failed to allocate resources for task. Error: %v", err) + return nil, core.PhaseInfo{}, err + } + + switch allocationStatus { + case core.AllocationStatusGranted: + metrics.AllocationGranted.Inc(ctx) + metrics.ResourceWaitTime.Observe(float64(a.clock.Since(state.AllocationTokenRequestStartTime).Milliseconds())) + return &State{ + AllocationTokenRequestStartTime: a.clock.Now(), + Phase: PhaseAllocationTokenAcquired, + }, core.PhaseInfoQueued(a.clock.Now(), 0, "Allocation token required"), nil + case core.AllocationStatusNamespaceQuotaExceeded: + case core.AllocationStatusExhausted: + metrics.AllocationNotGranted.Inc(ctx) + logger.Infof(ctx, "Couldn't allocate token because allocation status is [%v].", allocationStatus.String()) + startTime := state.AllocationTokenRequestStartTime + if startTime.IsZero() { + startTime = a.clock.Now() + } + + return &State{ + AllocationTokenRequestStartTime: startTime, + Phase: PhaseNotStarted, + }, core.PhaseInfoWaitingForResourcesInfo( + a.clock.Now(), 0, "Quota for task has exceeded. Waiting for the resource.", nil), nil + } + + return nil, core.PhaseInfo{}, fmt.Errorf("allocation status undefined [%v]", allocationStatus) +} + +func (a tokenAllocator) releaseToken(ctx context.Context, p webapi.AsyncPlugin, tCtx core.TaskExecutionContext, metrics Metrics) error { + ns, _, err := p.ResourceRequirements(ctx, tCtx) + if err != nil { + logger.Errorf(ctx, "Failed to calculate resource requirements for task. Error: %v", err) + return err + } + + token := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + err = tCtx.ResourceManager().ReleaseResource(ctx, ns, token) + if err != nil { + metrics.ResourceReleaseFailed.Inc(ctx) + logger.Errorf(ctx, "Failed to release resources for task. Error: %v", err) + return err + } + + metrics.ResourceReleased.Inc(ctx) + return nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token_test.go new file mode 100644 index 0000000000..4001045452 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/allocation_token_test.go @@ -0,0 +1,146 @@ +package webapi + +import ( + "context" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + testing2 "k8s.io/utils/clock/testing" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mocks2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" +) + +func init() { + labeled.SetMetricKeys(contextutils.NamespaceKey) +} + +func newPluginWithProperties(properties webapi.PluginConfig) *mocks.AsyncPlugin { + m := &mocks.AsyncPlugin{} + m.OnGetConfig().Return(properties) + return m +} + +func Test_allocateToken(t *testing.T) { + ctx := context.Background() + metrics := newMetrics(promutils.NewTestScope()) + + tNow := time.Now() + clck := testing2.NewFakeClock(tNow) + + tID := &mocks2.TaskExecutionID{} + tID.OnGetGeneratedName().Return("abc") + + tMeta := &mocks2.TaskExecutionMetadata{} + tMeta.OnGetTaskExecutionID().Return(tID) + + rm := &mocks2.ResourceManager{} + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc", mock.Anything).Return(core.AllocationStatusGranted, nil) + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc2", mock.Anything).Return(core.AllocationStatusExhausted, nil) + + tCtx := &mocks2.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(tMeta) + tCtx.OnResourceManager().Return(rm) + + state := &State{} + + p := newPluginWithProperties(webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "ns": 1, + }, + }) + + t.Run("no quota", func(t *testing.T) { + p := newPluginWithProperties(webapi.PluginConfig{ResourceQuotas: nil}) + a := newTokenAllocator(clck) + gotNewState, _, err := a.allocateToken(ctx, p, nil, nil, metrics) + assert.NoError(t, err) + if diff := deep.Equal(gotNewState, &State{ + AllocationTokenRequestStartTime: tNow, + Phase: PhaseAllocationTokenAcquired, + }); len(diff) > 0 { + t.Errorf("allocateToken() gotNewState = %v, Diff: %v", gotNewState, diff) + } + }) + + t.Run("Allocation Successful", func(t *testing.T) { + p.OnResourceRequirements(ctx, tCtx).Return("ns", core.ResourceConstraintsSpec{}, nil) + a := newTokenAllocator(clck) + gotNewState, _, err := a.allocateToken(ctx, p, tCtx, state, metrics) + assert.NoError(t, err) + if diff := deep.Equal(gotNewState, &State{ + AllocationTokenRequestStartTime: tNow, + Phase: PhaseAllocationTokenAcquired, + }); len(diff) > 0 { + t.Errorf("allocateToken() gotNewState = %v, Diff: %v", gotNewState, diff) + } + }) + + t.Run("Allocation Failed", func(t *testing.T) { + tID := &mocks2.TaskExecutionID{} + tID.OnGetGeneratedName().Return("abc2") + + tMeta := &mocks2.TaskExecutionMetadata{} + tMeta.OnGetTaskExecutionID().Return(tID) + + rm := &mocks2.ResourceManager{} + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc", mock.Anything).Return(core.AllocationStatusGranted, nil) + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc2", mock.Anything).Return(core.AllocationStatusExhausted, nil) + + tCtx := &mocks2.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(tMeta) + tCtx.OnResourceManager().Return(rm) + + p.OnResourceRequirements(ctx, tCtx).Return("ns", core.ResourceConstraintsSpec{}, nil) + a := newTokenAllocator(clck) + gotNewState, _, err := a.allocateToken(ctx, p, tCtx, state, metrics) + assert.NoError(t, err) + if diff := deep.Equal(gotNewState, &State{ + AllocationTokenRequestStartTime: tNow, + Phase: PhaseNotStarted, + }); len(diff) > 0 { + t.Errorf("allocateToken() gotNewState = %v, Diff: %v", gotNewState, diff) + } + }) +} + +func Test_releaseToken(t *testing.T) { + ctx := context.Background() + metrics := newMetrics(promutils.NewTestScope()) + + tNow := time.Now() + clck := testing2.NewFakeClock(tNow) + + tID := &mocks2.TaskExecutionID{} + tID.OnGetGeneratedName().Return("abc") + + tMeta := &mocks2.TaskExecutionMetadata{} + tMeta.OnGetTaskExecutionID().Return(tID) + + rm := &mocks2.ResourceManager{} + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc", mock.Anything).Return(core.AllocationStatusGranted, nil) + rm.OnAllocateResourceMatch(ctx, core.ResourceNamespace("ns"), "abc2", mock.Anything).Return(core.AllocationStatusExhausted, nil) + rm.OnReleaseResource(ctx, core.ResourceNamespace("ns"), "abc").Return(nil) + + tCtx := &mocks2.TaskExecutionContext{} + tCtx.OnTaskExecutionMetadata().Return(tMeta) + tCtx.OnResourceManager().Return(rm) + + p := newPluginWithProperties(webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "ns": 1, + }, + }) + p.OnResourceRequirements(ctx, tCtx).Return("ns", core.ResourceConstraintsSpec{}, nil) + + a := newTokenAllocator(clck) + assert.NoError(t, a.releaseToken(ctx, p, tCtx, metrics)) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go new file mode 100644 index 0000000000..b92f2e1363 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache.go @@ -0,0 +1,192 @@ +package webapi + +import ( + "context" + "time" + + "golang.org/x/time/rate" + "k8s.io/client-go/util/workqueue" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + stdErrors "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +//go:generate mockery -all -case=underscore + +const ( + BadReturnCodeError stdErrors.ErrorCode = "RETURNED_UNKNOWN" +) + +// Client interface needed for resource cache to fetch latest updates for resources. +type Client interface { + // Get multiple resources that match all the keys. If the plugin hits any failure, it should stop and return + // the failure. This batch will not be processed further. + Get(ctx context.Context, tCtx webapi.GetContext) (latest webapi.Resource, err error) + + // Status checks the status of a given resource and translates it to a Flyte-understandable PhaseInfo. This API + // should avoid making any network calls and should run very efficiently. + Status(ctx context.Context, tCtx webapi.StatusContext) (phase core.PhaseInfo, err error) +} + +// A generic AutoRefresh cache that uses a client to fetch items' status. +type ResourceCache struct { + // AutoRefresh + cache.AutoRefresh + client Client + cfg webapi.CachingConfig +} + +// A wrapper for each item in the cache. +type CacheItem struct { + State + Resource webapi.Resource +} + +func (c CacheItem) IsTerminal() bool { + if c.Resource != nil { + if resource, ok := c.Resource.(interface{ IsTerminal() bool }); ok { + return resource.IsTerminal() + } + } + return c.State.Phase.IsTerminal() +} + +// This basically grab an updated status from Client and store it in the cache +// All other handling should be in the synchronous loop. +func (q *ResourceCache) SyncResource(ctx context.Context, batch cache.Batch) ( + updatedBatch []cache.ItemSyncResponse, err error) { + + resp := make([]cache.ItemSyncResponse, 0, len(batch)) + for _, resource := range batch { + // Cast the item back to the thing we want to work with. + cacheItem, ok := resource.GetItem().(CacheItem) + if !ok { + logger.Errorf(ctx, "Sync loop - Error casting cache object into CacheItem") + return nil, errors.Errorf(errors.CacheFailed, "Failed to cast [%v]", batch[0].GetID()) + } + + if len(resource.GetID()) == 0 { + logger.Warnf(ctx, "Sync loop - ResourceKey is blank for [%s] skipping", resource.GetID()) + resp = append(resp, cache.ItemSyncResponse{ + ID: resource.GetID(), + Item: resource.GetItem(), + Action: cache.Unchanged, + }) + + continue + } + + logger.Debugf(ctx, "Sync loop - processing resource with cache key [%s]", + resource.GetID()) + + if cacheItem.IsTerminal() { + logger.Debugf(ctx, "Sync loop - resource cache key [%v] in terminal state [%s]", + resource.GetID()) + resp = append(resp, cache.ItemSyncResponse{ + ID: resource.GetID(), + Item: cacheItem, + Action: cache.Unchanged, + }) + + continue + } + + if cacheItem.SyncFailureCount > q.cfg.MaxSystemFailures { + logger.Debugf(ctx, "Sync loop - Item with key [%v] has failed to sync [%v] time(s). More than the allowed [%v] time(s). Marking as failure.", + cacheItem.SyncFailureCount, q.cfg.MaxSystemFailures) + cacheItem.State.Phase = PhaseSystemFailure + resp = append(resp, cache.ItemSyncResponse{ + ID: resource.GetID(), + Item: cacheItem, + Action: cache.Update, + }) + + continue + } + + // Get an updated status + logger.Debugf(ctx, "Querying AsyncPlugin for %s", resource.GetID()) + newResource, err := q.client.Get(ctx, newPluginContext(cacheItem.ResourceMeta, cacheItem.Resource, "", nil)) + if err != nil { + logger.Infof(ctx, "Error retrieving resource [%s]. Error: %v", resource.GetID(), err) + cacheItem.SyncFailureCount++ + cacheItem.ErrorMessage = err.Error() + + // Make sure we don't return nil for the first argument, because that deletes it from the cache. + resp = append(resp, cache.ItemSyncResponse{ + ID: resource.GetID(), + Item: cacheItem, + Action: cache.Update, + }) + + continue + } + + cacheItem.Resource = newResource + + resp = append(resp, cache.ItemSyncResponse{ + ID: resource.GetID(), + Item: cacheItem, + Action: cache.Update, + }) + } + + return resp, nil +} + +// ToPluginPhase translates the more granular task phase into the webapi plugin phase. +func ToPluginPhase(s core.Phase) (Phase, error) { + switch s { + + case core.PhaseUndefined: + fallthrough + case core.PhaseNotReady: + return PhaseNotStarted, nil + case core.PhaseInitializing: + fallthrough + case core.PhaseWaitingForResources: + fallthrough + case core.PhaseQueued: + fallthrough + case core.PhaseRunning: + return PhaseResourcesCreated, nil + case core.PhaseSuccess: + return PhaseSucceeded, nil + case core.PhasePermanentFailure: + fallthrough + case core.PhaseRetryableFailure: + return PhaseUserFailure, nil + default: + return PhaseSystemFailure, errors.Errorf(BadReturnCodeError, "default fallthrough case") + } +} + +func NewResourceCache(ctx context.Context, name string, client Client, cfg webapi.CachingConfig, + rateCfg webapi.RateLimiterConfig, + scope promutils.Scope) (ResourceCache, error) { + + q := ResourceCache{ + client: client, + cfg: cfg, + } + + autoRefreshCache, err := cache.NewAutoRefreshCache(name, q.SyncResource, + workqueue.NewMaxOfRateLimiter( + workqueue.NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second), + &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(rateCfg.QPS), rateCfg.Burst)}, + ), cfg.ResyncInterval.Duration, uint(cfg.Workers), uint(cfg.Size), + scope.NewSubScope("cache")) + + if err != nil { + logger.Errorf(ctx, "Could not create cache. Error: [%s]", err) + return q, errors.Wrapf(errors.CacheFailed, err, "Error creating AutoRefreshCache") + } + + q.AutoRefresh = autoRefreshCache + return q, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache_test.go new file mode 100644 index 0000000000..20ac1a8017 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/cache_test.go @@ -0,0 +1,197 @@ +package webapi + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mocks2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/internal/webapi/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + cacheMocks "github.com/flyteorg/flyte/v2/flytestdlib/cache/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +func TestNewResourceCache(t *testing.T) { + t.Run("Simple", func(t *testing.T) { + c, err := NewResourceCache(context.Background(), "Cache1", &mocks.Client{}, webapi.CachingConfig{ + Size: 10, + }, webapi.RateLimiterConfig{QPS: 1, Burst: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + assert.NotNil(t, c) + }) + + t.Run("Error", func(t *testing.T) { + _, err := NewResourceCache(context.Background(), "Cache1", &mocks.Client{}, webapi.CachingConfig{}, + webapi.RateLimiterConfig{}, + promutils.NewTestScope()) + assert.Error(t, err) + }) +} + +func TestResourceCache_SyncResource(t *testing.T) { + ctx := context.Background() + + t.Run("Terminal state return unchanged", func(t *testing.T) { + mockCache := &cacheMocks.AutoRefresh{} + mockClient := &mocks.Client{} + + q := ResourceCache{ + AutoRefresh: mockCache, + client: mockClient, + cfg: webapi.CachingConfig{ + MaxSystemFailures: 5, + }, + } + + state := State{ + Phase: PhaseSucceeded, + } + + cacheItem := CacheItem{ + State: state, + } + + iw := &cacheMocks.ItemWrapper{} + iw.EXPECT().GetItem().Return(cacheItem) + iw.EXPECT().GetID().Return("some-id") + + newCacheItem, err := q.SyncResource(ctx, []cache.ItemWrapper{iw}) + assert.NoError(t, err) + assert.Equal(t, cache.Unchanged, newCacheItem[0].Action) + assert.Equal(t, cacheItem, newCacheItem[0].Item) + }) + + t.Run("Retry limit exceeded", func(t *testing.T) { + mockCache := &cacheMocks.AutoRefresh{} + mockClient := &mocks.Client{} + + q := ResourceCache{ + AutoRefresh: mockCache, + client: mockClient, + cfg: webapi.CachingConfig{ + MaxSystemFailures: 2, + }, + } + + cacheItem := CacheItem{ + State: State{ + SyncFailureCount: 5, + ErrorMessage: "some error", + }, + } + + iw := &cacheMocks.ItemWrapper{} + iw.EXPECT().GetItem().Return(cacheItem) + iw.EXPECT().GetID().Return("some-id") + + newCacheItem, err := q.SyncResource(ctx, []cache.ItemWrapper{iw}) + assert.NoError(t, err) + assert.Equal(t, cache.Update, newCacheItem[0].Action) + cacheItem.State.Phase = PhaseSystemFailure + assert.Equal(t, cacheItem, newCacheItem[0].Item) + }) + + t.Run("move to success", func(t *testing.T) { + mockCache := &cacheMocks.AutoRefresh{} + mockClient := &mocks.Client{} + q := ResourceCache{ + AutoRefresh: mockCache, + client: mockClient, + cfg: webapi.CachingConfig{ + MaxSystemFailures: 5, + }, + } + + state := State{ + ResourceMeta: "123456", + Phase: PhaseResourcesCreated, + } + + cacheItem := CacheItem{ + State: state, + } + + mockClient.OnGet(ctx, newPluginContext("123456", nil, "", nil)).Return("newID", nil) + mockClient.OnStatusMatch(mock.Anything, "newID", mock.Anything).Return(core.PhaseInfoSuccess(nil), nil) + + iw := &cacheMocks.ItemWrapper{} + iw.EXPECT().GetItem().Return(cacheItem) + iw.EXPECT().GetID().Return("some-id") + + newCacheItem, err := q.SyncResource(ctx, []cache.ItemWrapper{iw}) + assert.NoError(t, err) + assert.Equal(t, cache.Update, newCacheItem[0].Action) + }) + + t.Run("Failing to retrieve latest", func(t *testing.T) { + mockCache := &cacheMocks.AutoRefresh{} + mockClient := &mocks.Client{} + mockSecretManager := &mocks2.SecretManager{} + mockSecretManager.OnGetMatch(mock.Anything, mock.Anything).Return("fake key", nil) + + q := ResourceCache{ + AutoRefresh: mockCache, + client: mockClient, + cfg: webapi.CachingConfig{ + MaxSystemFailures: 5, + }, + } + + state := State{ + ResourceMeta: "123456", + Phase: PhaseResourcesCreated, + } + + cacheItem := CacheItem{ + State: state, + } + + mockClient.OnGet(ctx, newPluginContext("123456", nil, "", nil)).Return("newID", fmt.Errorf("failed to retrieve resource")) + + iw := &cacheMocks.ItemWrapper{} + iw.EXPECT().GetItem().Return(cacheItem) + iw.EXPECT().GetID().Return("some-id") + + newCacheItem, err := q.SyncResource(ctx, []cache.ItemWrapper{iw}) + newExecutionState := newCacheItem[0].Item.(CacheItem) + assert.NoError(t, err) + assert.Equal(t, cache.Update, newCacheItem[0].Action) + assert.Equal(t, PhaseResourcesCreated, newExecutionState.Phase) + }) +} + +func TestToPluginPhase(t *testing.T) { + tests := []struct { + args core.Phase + want Phase + wantErr bool + }{ + {core.PhaseNotReady, PhaseNotStarted, false}, + {core.PhaseUndefined, PhaseNotStarted, false}, + {core.PhaseInitializing, PhaseResourcesCreated, false}, + {core.PhaseWaitingForResources, PhaseResourcesCreated, false}, + {core.PhaseQueued, PhaseResourcesCreated, false}, + {core.PhaseRunning, PhaseResourcesCreated, false}, + {core.PhaseSuccess, PhaseSucceeded, false}, + {core.PhasePermanentFailure, PhaseUserFailure, false}, + {core.PhaseRetryableFailure, PhaseUserFailure, false}, + } + for _, tt := range tests { + t.Run(tt.args.String(), func(t *testing.T) { + got, err := ToPluginPhase(tt.args) + if (err != nil) != tt.wantErr { + t.Errorf("ToPluginPhase() error = %v, wantErr %v", err, tt.wantErr) + return + } + if got != tt.want { + t.Errorf("ToPluginPhase() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core.go new file mode 100644 index 0000000000..6356fba21d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core.go @@ -0,0 +1,224 @@ +package webapi + +import ( + "context" + "encoding/gob" + "fmt" + "time" + + "k8s.io/utils/clock" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + stdErrs "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +const ( + pluginStateVersion = 1 + minCacheSize = 10 + maxCacheSize = 500000 + minWorkers = 1 + maxWorkers = 10000 + minSyncDuration = 5 * time.Second + maxSyncDuration = time.Hour + minBurst = 5 + maxBurst = 10000 + minQPS = 1 + maxQPS = 100000 +) + +type CorePlugin struct { + id string + p webapi.AsyncPlugin + cache cache.AutoRefresh + tokenAllocator tokenAllocator + metrics Metrics +} + +func (c CorePlugin) unmarshalState(ctx context.Context, stateReader core.PluginStateReader) (State, error) { + t := c.metrics.SucceededUnmarshalState.Start(ctx) + existingState := State{} + + // We assume here that the first time this function is called, the custom state we get back is whatever we passed in, + // namely the zero-value of our struct. + if _, err := stateReader.Get(&existingState); err != nil { + c.metrics.FailedUnmarshalState.Inc(ctx) + logger.Errorf(ctx, "AsyncPlugin [%v] failed to unmarshal custom state. Error: %v", + c.GetID(), err) + + return State{}, errors.Wrapf(errors.CorruptedPluginState, err, + "Failed to unmarshal custom state in Handle") + } + + t.Stop() + return existingState, nil +} + +func (c CorePlugin) GetID() string { + return c.id +} + +func (c CorePlugin) GetProperties() core.PluginProperties { + return core.PluginProperties{} +} + +func (c CorePlugin) Handle(ctx context.Context, tCtx core.TaskExecutionContext) (core.Transition, error) { + incomingState, err := c.unmarshalState(ctx, tCtx.PluginStateReader()) + if err != nil { + return core.UnknownTransition, err + } + + var nextState *State + var phaseInfo core.PhaseInfo + + switch incomingState.Phase { + case PhaseNotStarted: + if len(c.p.GetConfig().ResourceQuotas) > 0 { + nextState, phaseInfo, err = c.tokenAllocator.allocateToken(ctx, c.p, tCtx, &incomingState, c.metrics) + } else { + nextState, phaseInfo, err = launch(ctx, c.p, tCtx, c.cache, &incomingState) + } + case PhaseAllocationTokenAcquired: + nextState, phaseInfo, err = launch(ctx, c.p, tCtx, c.cache, &incomingState) + case PhaseResourcesCreated: + nextState, phaseInfo, err = monitor(ctx, tCtx, c.p, c.cache, &incomingState) + } + + if err != nil { + return core.UnknownTransition, err + } + + if err := tCtx.PluginStateWriter().Put(pluginStateVersion, nextState); err != nil { + return core.UnknownTransition, err + } + + return core.DoTransition(phaseInfo), nil +} + +func (c CorePlugin) Abort(ctx context.Context, tCtx core.TaskExecutionContext) error { + incomingState, err := c.unmarshalState(ctx, tCtx.PluginStateReader()) + if err != nil { + return err + } + + logger.Infof(ctx, "Attempting to abort resource [%v].", tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetID()) + + err = c.p.Delete(ctx, newPluginContext(incomingState.ResourceMeta, nil, "Aborted", tCtx)) + if err != nil { + logger.Errorf(ctx, "Failed to abort some resources [%v]. Error: %v", + tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), err) + return err + } + + return nil +} + +func (c CorePlugin) Finalize(ctx context.Context, tCtx core.TaskExecutionContext) error { + cacheItemID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + err := c.cache.DeleteDelayed(cacheItemID) + if err != nil { + logger.Errorf(ctx, "Failed to delete resource [%v] from cache. Error: %v", cacheItemID, err) + return fmt.Errorf("failed to delete resource [%v] from cache. Error: %v", cacheItemID, err) + } + + if len(c.p.GetConfig().ResourceQuotas) == 0 { + // If there are no defined quotas, there is nothing to cleanup. + return nil + } + + logger.Infof(ctx, "Attempting to finalize resource [%v].", cacheItemID) + + return c.tokenAllocator.releaseToken(ctx, c.p, tCtx, c.metrics) +} + +func validateRangeInt(fieldName string, min, max, provided int) error { + if provided > max || provided < min { + return fmt.Errorf("%v is expected to be between %v and %v. Provided value is %v", + fieldName, min, max, provided) + } + + return nil +} + +func validateRangeFloat64(fieldName string, min, max, provided float64) error { + if provided > max || provided < min { + return fmt.Errorf("%v is expected to be between %v and %v. Provided value is %v", + fieldName, min, max, provided) + } + + return nil +} + +func validateConfig(cfg webapi.PluginConfig) error { + errs := stdErrs.ErrorCollection{} + errs.Append(validateRangeInt("cache size", minCacheSize, maxCacheSize, cfg.Caching.Size)) + errs.Append(validateRangeInt("workers count", minWorkers, maxWorkers, cfg.Caching.Workers)) + errs.Append(validateRangeFloat64("resync interval", minSyncDuration.Seconds(), maxSyncDuration.Seconds(), cfg.Caching.ResyncInterval.Seconds())) + errs.Append(validateRangeInt("read burst", minBurst, maxBurst, cfg.ReadRateLimiter.Burst)) + errs.Append(validateRangeInt("read qps", minQPS, maxQPS, cfg.ReadRateLimiter.QPS)) + errs.Append(validateRangeInt("write burst", minBurst, maxBurst, cfg.WriteRateLimiter.Burst)) + errs.Append(validateRangeInt("write qps", minQPS, maxQPS, cfg.WriteRateLimiter.QPS)) + + return errs.ErrorOrDefault() +} + +func createRemotePlugin(pluginEntry webapi.PluginEntry, c clock.Clock) core.PluginEntry { + return core.PluginEntry{ + ID: pluginEntry.ID, + RegisteredTaskTypes: pluginEntry.SupportedTaskTypes, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) ( + core.Plugin, error) { + p, err := pluginEntry.PluginLoader(ctx, iCtx) + if err != nil { + return nil, err + } + + err = validateConfig(p.GetConfig()) + if err != nil { + return nil, fmt.Errorf("config validation failed. Error: %w", err) + } + + // If the plugin will use a custom state, register it to be able to + // serialize/deserialize interfaces later. + if customState := p.GetConfig().ResourceMeta; customState != nil { + gob.Register(customState) + } + + if quotas := p.GetConfig().ResourceQuotas; len(quotas) > 0 { + for ns, quota := range quotas { + err := iCtx.ResourceRegistrar().RegisterResourceQuota(ctx, ns, quota) + if err != nil { + return nil, err + } + } + } + + resourceCache, err := NewResourceCache(ctx, pluginEntry.ID, p, p.GetConfig().Caching, + p.GetConfig().ReadRateLimiter, iCtx.MetricsScope().NewSubScope("cache")) + + if err != nil { + return nil, err + } + + err = resourceCache.Start(ctx) + if err != nil { + return nil, err + } + + return CorePlugin{ + id: pluginEntry.ID, + p: p, + cache: resourceCache, + metrics: newMetrics(iCtx.MetricsScope()), + tokenAllocator: newTokenAllocator(c), + }, nil + }, + } +} + +func CreateRemotePlugin(pluginEntry webapi.PluginEntry) core.PluginEntry { + return createRemotePlugin(pluginEntry, clock.RealClock{}) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core_test.go new file mode 100644 index 0000000000..8a99a4419d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/core_test.go @@ -0,0 +1,95 @@ +package webapi + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +func Test_validateConfig(t *testing.T) { + t.Run("In range", func(t *testing.T) { + cfg := webapi.PluginConfig{ + ReadRateLimiter: webapi.RateLimiterConfig{ + QPS: 10, + Burst: 100, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + QPS: 10, + Burst: 100, + }, + Caching: webapi.CachingConfig{ + Size: 10, + ResyncInterval: config.Duration{Duration: 10 * time.Second}, + Workers: 10, + }, + } + + assert.NoError(t, validateConfig(cfg)) + }) + + t.Run("Below min", func(t *testing.T) { + cfg := webapi.PluginConfig{ + ReadRateLimiter: webapi.RateLimiterConfig{ + QPS: 0, + Burst: 0, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + QPS: 0, + Burst: 0, + }, + Caching: webapi.CachingConfig{ + Size: 0, + ResyncInterval: config.Duration{Duration: 0 * time.Second}, + Workers: 0, + }, + } + + err := validateConfig(cfg) + assert.Error(t, err) + assert.Equal(t, "\ncache size is expected to be between 10 and 500000. Provided value is 0\nworkers count is expected to be between 1 and 10000. Provided value is 0\nresync interval is expected to be between 5 and 3600. Provided value is 0\nread burst is expected to be between 5 and 10000. Provided value is 0\nread qps is expected to be between 1 and 100000. Provided value is 0\nwrite burst is expected to be between 5 and 10000. Provided value is 0\nwrite qps is expected to be between 1 and 100000. Provided value is 0", err.Error()) + }) + + t.Run("Above max", func(t *testing.T) { + cfg := webapi.PluginConfig{ + ReadRateLimiter: webapi.RateLimiterConfig{ + QPS: 1000, + Burst: 1000000, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + QPS: 1000, + Burst: 1000000, + }, + Caching: webapi.CachingConfig{ + Size: 1000000000, + ResyncInterval: config.Duration{Duration: 10000 * time.Hour}, + Workers: 1000000000, + }, + } + + err := validateConfig(cfg) + assert.Error(t, err) + assert.Equal(t, "\ncache size is expected to be between 10 and 500000. Provided value is 1000000000\nworkers count is expected to be between 1 and 10000. Provided value is 1000000000\nresync interval is expected to be between 5 and 3600. Provided value is 3.6e+07\nread burst is expected to be between 5 and 10000. Provided value is 1000000\nwrite burst is expected to be between 5 and 10000. Provided value is 1000000", err.Error()) + }) +} + +func TestCreateRemotePlugin(t *testing.T) { + CreateRemotePlugin(webapi.PluginEntry{ + ID: "MyTestPlugin", + SupportedTaskTypes: []core.TaskType{"test-task"}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + return newPluginWithProperties(webapi.PluginConfig{ + Caching: webapi.CachingConfig{ + Size: 10, + }, + }), nil + }, + IsDefault: false, + DefaultForTaskTypes: []core.TaskType{"test-task"}, + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher.go new file mode 100644 index 0000000000..bca65583ec --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher.go @@ -0,0 +1,55 @@ +package webapi + +import ( + "context" + "time" + + pluginErrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +func launch(ctx context.Context, p webapi.AsyncPlugin, tCtx core.TaskExecutionContext, cache cache.AutoRefresh, + state *State) (newState *State, phaseInfo core.PhaseInfo, err error) { + rMeta, r, err := p.Create(ctx, tCtx) + if err != nil { + logger.Errorf(ctx, "Failed to create resource. Error: %v", err) + return state, core.PhaseInfoRetryableFailure(pluginErrors.TaskFailedWithError, err.Error(), nil), nil + } + + // If the plugin also returned the created resource, check to see if it's already in a terminal state. + logger.Infof(ctx, "Created Resource Name [%s] and Meta [%v]", tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), rMeta) + if r != nil { + phase, err := p.Status(ctx, newPluginContext(rMeta, r, "", tCtx)) + if err != nil { + logger.Errorf(ctx, "Failed to check resource status. Error: %v", err) + return nil, core.PhaseInfo{}, err + } + + if phase.Phase().IsTerminal() { + logger.Infof(ctx, "Resource has already terminated ID:[%s], Phase:[%s]", + tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), phase.Phase()) + return state, phase, nil + } + } + + // Store the created resource name, and update our state. + state.ResourceMeta = rMeta + state.Phase = PhaseResourcesCreated + state.PhaseVersion = 2 + + cacheItem := CacheItem{ + State: *state, + } + + // Also, add to the AutoRefreshCache so we start getting updates through background refresh. + _, err = cache.GetOrCreate(tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName(), cacheItem) + if err != nil { + logger.Errorf(ctx, "Failed to add item to cache. Error: %v", err) + return nil, core.PhaseInfo{}, err + } + + return state, core.PhaseInfoQueued(time.Now(), state.PhaseVersion, "launched"), nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher_test.go new file mode 100644 index 0000000000..f139b95d3b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/launcher_test.go @@ -0,0 +1,112 @@ +package webapi + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + mocks2 "github.com/flyteorg/flyte/v2/flytestdlib/cache/mocks" +) + +func Test_launch(t *testing.T) { + t.Run("Successful launch", func(t *testing.T) { + ctx := context.Background() + tCtx := &mocks.TaskExecutionContext{} + meta := &mocks.TaskExecutionMetadata{} + taskID := &mocks.TaskExecutionID{} + taskID.OnGetGeneratedName().Return("my-id") + meta.OnGetTaskExecutionID().Return(taskID) + tCtx.OnTaskExecutionMetadata().Return(meta) + + c := &mocks2.AutoRefresh{} + s := State{ + ResourceMeta: "abc", + Phase: PhaseResourcesCreated, + PhaseVersion: 2, + } + c.EXPECT().GetOrCreate("my-id", CacheItem{State: s}).Return(CacheItem{State: s}, nil) + + plgn := newPluginWithProperties(webapi.PluginConfig{}) + plgn.OnCreate(ctx, tCtx).Return("abc", nil, nil) + plgn.OnStatus(ctx, newPluginContext("abc", nil, "", tCtx)).Return(core.PhaseInfoSuccess(nil), nil) + newS, phaseInfo, err := launch(ctx, plgn, tCtx, c, &s) + assert.NoError(t, err) + assert.NotNil(t, newS) + assert.NotNil(t, phaseInfo) + }) + + t.Run("Already succeeded when launched", func(t *testing.T) { + ctx := context.Background() + tCtx := &mocks.TaskExecutionContext{} + meta := &mocks.TaskExecutionMetadata{} + taskID := &mocks.TaskExecutionID{} + taskID.OnGetGeneratedName().Return("my-id") + meta.OnGetTaskExecutionID().Return(taskID) + tCtx.OnTaskExecutionMetadata().Return(meta) + + c := &mocks2.AutoRefresh{} + s := State{ + Phase: PhaseResourcesCreated, + PhaseVersion: 2, + ResourceMeta: "abc", + } + + plgn := newPluginWithProperties(webapi.PluginConfig{}) + plgn.OnCreate(ctx, tCtx).Return("abc", "abc-r", nil) + plgn.OnStatus(ctx, newPluginContext("abc", "abc-r", "", tCtx)).Return(core.PhaseInfoSuccess(nil), nil) + newS, phaseInfo, err := launch(ctx, plgn, tCtx, c, &s) + assert.NoError(t, err) + assert.NotNil(t, newS) + assert.NotNil(t, phaseInfo) + assert.Equal(t, core.PhaseSuccess, phaseInfo.Phase()) + }) + + t.Run("Failed to create resource", func(t *testing.T) { + ctx := context.Background() + tCtx := &mocks.TaskExecutionContext{} + meta := &mocks.TaskExecutionMetadata{} + taskID := &mocks.TaskExecutionID{} + taskID.OnGetGeneratedName().Return("my-id") + meta.OnGetTaskExecutionID().Return(taskID) + tCtx.OnTaskExecutionMetadata().Return(meta) + + c := &mocks2.AutoRefresh{} + s := State{} + c.EXPECT().GetOrCreate("my-id", CacheItem{State: s}).Return(CacheItem{State: s}, nil) + + plgn := newPluginWithProperties(webapi.PluginConfig{}) + plgn.OnCreate(ctx, tCtx).Return("", nil, fmt.Errorf("error creating")) + _, phase, err := launch(ctx, plgn, tCtx, c, &s) + assert.Nil(t, err) + assert.Equal(t, core.PhaseRetryableFailure, phase.Phase()) + }) + + t.Run("Failed to cache", func(t *testing.T) { + ctx := context.Background() + tCtx := &mocks.TaskExecutionContext{} + meta := &mocks.TaskExecutionMetadata{} + taskID := &mocks.TaskExecutionID{} + taskID.OnGetGeneratedName().Return("my-id") + meta.OnGetTaskExecutionID().Return(taskID) + tCtx.OnTaskExecutionMetadata().Return(meta) + + c := &mocks2.AutoRefresh{} + s := State{ + Phase: PhaseResourcesCreated, + PhaseVersion: 2, + ResourceMeta: "my-id", + } + c.EXPECT().GetOrCreate("my-id", CacheItem{State: s}).Return(CacheItem{State: s}, fmt.Errorf("failed to cache")) + + plgn := newPluginWithProperties(webapi.PluginConfig{}) + plgn.OnCreate(ctx, tCtx).Return("my-id", nil, nil) + plgn.OnStatus(ctx, newPluginContext("my-id", nil, "", tCtx)).Return(core.PhaseInfoRunning(0, nil), nil) + _, _, err := launch(ctx, plgn, tCtx, c, &s) + assert.Error(t, err) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/metrics.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/metrics.go new file mode 100644 index 0000000000..e59be1d4fd --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/metrics.go @@ -0,0 +1,45 @@ +package webapi + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" +) + +type Metrics struct { + Scope promutils.Scope + ResourceReleased labeled.Counter + ResourceReleaseFailed labeled.Counter + AllocationGranted labeled.Counter + AllocationNotGranted labeled.Counter + ResourceWaitTime prometheus.Summary + SucceededUnmarshalState labeled.StopWatch + FailedUnmarshalState labeled.Counter +} + +var ( + tokenAgeObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001, 1.0: 0.0} +) + +func newMetrics(scope promutils.Scope) Metrics { + return Metrics{ + Scope: scope, + ResourceReleased: labeled.NewCounter("resource_release_success", + "Resource allocation token released", scope, labeled.EmitUnlabeledMetric), + ResourceReleaseFailed: labeled.NewCounter("resource_release_failed", + "Error releasing allocation token", scope, labeled.EmitUnlabeledMetric), + AllocationGranted: labeled.NewCounter("allocation_grant_success", + "Allocation request granted", scope, labeled.EmitUnlabeledMetric), + AllocationNotGranted: labeled.NewCounter("allocation_grant_failed", + "Allocation request did not fail but not granted", scope, labeled.EmitUnlabeledMetric), + ResourceWaitTime: scope.MustNewSummaryWithOptions("resource_wait_time", "Duration the execution has been waiting for a resource allocation token", + promutils.SummaryOptions{Objectives: tokenAgeObjectives}), + SucceededUnmarshalState: labeled.NewStopWatch("unmarshal_state_success", "Successfully unmarshaled state", + time.Millisecond, scope), + FailedUnmarshalState: labeled.NewCounter("unmarshal_state_failed", + "Failed to unmarshal state", scope, labeled.EmitUnlabeledMetric), + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/mocks/client.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/mocks/client.go new file mode 100644 index 0000000000..ea4cd09c7d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/mocks/client.go @@ -0,0 +1,98 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + + mock "github.com/stretchr/testify/mock" + + webapi "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +type Client_Get struct { + *mock.Call +} + +func (_m Client_Get) Return(latest interface{}, err error) *Client_Get { + return &Client_Get{Call: _m.Call.Return(latest, err)} +} + +func (_m *Client) OnGet(ctx context.Context, tCtx webapi.GetContext) *Client_Get { + c_call := _m.On("Get", ctx, tCtx) + return &Client_Get{Call: c_call} +} + +func (_m *Client) OnGetMatch(matchers ...interface{}) *Client_Get { + c_call := _m.On("Get", matchers...) + return &Client_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx, tCtx +func (_m *Client) Get(ctx context.Context, tCtx webapi.GetContext) (interface{}, error) { + ret := _m.Called(ctx, tCtx) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, webapi.GetContext) interface{}); ok { + r0 = rf(ctx, tCtx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, webapi.GetContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Client_Status struct { + *mock.Call +} + +func (_m Client_Status) Return(phase core.PhaseInfo, err error) *Client_Status { + return &Client_Status{Call: _m.Call.Return(phase, err)} +} + +func (_m *Client) OnStatus(ctx context.Context, tCtx webapi.StatusContext) *Client_Status { + c_call := _m.On("Status", ctx, tCtx) + return &Client_Status{Call: c_call} +} + +func (_m *Client) OnStatusMatch(matchers ...interface{}) *Client_Status { + c_call := _m.On("Status", matchers...) + return &Client_Status{Call: c_call} +} + +// Status provides a mock function with given fields: ctx, tCtx +func (_m *Client) Status(ctx context.Context, tCtx webapi.StatusContext) (core.PhaseInfo, error) { + ret := _m.Called(ctx, tCtx) + + var r0 core.PhaseInfo + if rf, ok := ret.Get(0).(func(context.Context, webapi.StatusContext) core.PhaseInfo); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Get(0).(core.PhaseInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, webapi.StatusContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor.go new file mode 100644 index 0000000000..4910b1f373 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor.go @@ -0,0 +1,73 @@ +package webapi + +import ( + "context" + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +func monitor(ctx context.Context, tCtx core.TaskExecutionContext, p Client, cache cache.AutoRefresh, state *State) ( + newState *State, phaseInfo core.PhaseInfo, err error) { + newCacheItem := CacheItem{ + State: *state, + } + + cacheItemID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + item, err := cache.GetOrCreate(cacheItemID, newCacheItem) + if err != nil { + return nil, core.PhaseInfo{}, err + } + + cacheItem, ok := item.(CacheItem) + if !ok { + logger.Errorf(ctx, "Error casting cache object into ExecutionState") + return nil, core.PhaseInfo{}, errors.Errorf( + errors.CacheFailed, "Failed to cast [%v]", cacheItem) + } + + // If the cache has not synced yet, just return + if cacheItem.Resource == nil { + if cacheItem.Phase.IsTerminal() { + err = cache.DeleteDelayed(cacheItemID) + if err != nil { + logger.Errorf(ctx, "Failed to queue item for deletion in the cache with Item Id: [%v]. Error: %v", + cacheItemID, err) + } + return state, core.PhaseInfoFailure(errors.CacheFailed, cacheItem.ErrorMessage, nil), nil + } + return state, core.PhaseInfoQueued(time.Now(), cacheItem.PhaseVersion, "job submitted"), nil + } + + newPhase, err := p.Status(ctx, newPluginContext(cacheItem.ResourceMeta, cacheItem.Resource, "", tCtx)) + if err != nil { + return nil, core.PhaseInfoUndefined, err + } + + newPluginPhase, err := ToPluginPhase(newPhase.Phase()) + if err != nil { + return nil, core.PhaseInfoUndefined, err + } + + if cacheItem.Phase != newPluginPhase { + logger.Infof(ctx, "Moving Phase for from %s to %s", cacheItem.Phase, newPluginPhase) + } + + cacheItem.Phase = newPluginPhase + cacheItem.PhaseVersion = newPhase.Version() + + if newPluginPhase.IsTerminal() { + // Queue item for deletion in the cache. + err = cache.DeleteDelayed(cacheItemID) + if err != nil { + logger.Errorf(ctx, "Failed to queue item for deletion in the cache with Item Id: [%v]. Error: %v", + cacheItemID, err) + } + } + + // If there were updates made to the state, we'll have picked them up automatically. Nothing more to do. + return &cacheItem.State, newPhase, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor_test.go new file mode 100644 index 0000000000..0557b8567b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/monitor_test.go @@ -0,0 +1,78 @@ +package webapi + +import ( + "context" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/client-go/util/workqueue" + + core2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + internalMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/internal/webapi/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/cache" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func Test_monitor(t *testing.T) { + ctx := context.Background() + ctx, cancel := context.WithCancel(ctx) + tCtx := &mocks.TaskExecutionContext{} + ctxMeta := &mocks.TaskExecutionMetadata{} + execID := &mocks.TaskExecutionID{} + execID.OnGetGeneratedName().Return("generated_name") + execID.OnGetID().Return(core.TaskExecutionIdentifier{}) + ctxMeta.OnGetTaskExecutionID().Return(execID) + tCtx.OnTaskExecutionMetadata().Return(ctxMeta) + + client := &internalMocks.Client{} + client.OnStatusMatch(ctx, mock.Anything).Return(core2.PhaseInfoSuccess(nil), nil) + + wg := sync.WaitGroup{} + wg.Add(8) + cacheObj, err := cache.NewAutoRefreshCache(rand.String(5), func(ctx context.Context, batch cache.Batch) (updatedBatch []cache.ItemSyncResponse, err error) { + wg.Done() + t.Logf("Syncing Item [%+v]", batch[0]) + return []cache.ItemSyncResponse{ + { + ID: batch[0].GetID(), + Item: batch[0].GetItem(), + Action: cache.Update, + }, + }, nil + }, workqueue.DefaultControllerRateLimiter(), time.Second, 1, 10, promutils.NewTestScope()) + assert.NoError(t, err) + + assert.NoError(t, cacheObj.Start(ctx)) + + // Insert a dummy item to make sure the sync loop keeps getting invoked + _, err = cacheObj.GetOrCreate("generated_name2", CacheItem{Resource: "fake_resource2"}) + assert.NoError(t, err) + + _, err = cacheObj.GetOrCreate("generated_name", CacheItem{Resource: "fake_resource"}) + assert.NoError(t, err) + + s := &State{} + newState, phaseInfo, err := monitor(ctx, tCtx, client, cacheObj, s) + assert.NoError(t, err) + assert.NotNil(t, newState) + assert.NotNil(t, phaseInfo) + assert.Equal(t, core2.PhaseSuccess.String(), phaseInfo.Phase().String()) + + // Make sure the item is still in the cache as is... + cachedItem, err := cacheObj.GetOrCreate("generated_name", CacheItem{Resource: "shouldnt_insert"}) + assert.NoError(t, err) + assert.Equal(t, "fake_resource", cachedItem.(CacheItem).Resource.(string)) + + // Wait for sync to run to actually delete the resource + wg.Wait() + cancel() + cachedItem, err = cacheObj.GetOrCreate("generated_name", CacheItem{Resource: "new_resource"}) + assert.NoError(t, err) + assert.Equal(t, "new_resource", cachedItem.(CacheItem).Resource.(string)) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/phase_enumer.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/phase_enumer.go new file mode 100644 index 0000000000..9eff931df6 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/phase_enumer.go @@ -0,0 +1,53 @@ +// Code generated by "enumer -type=Phase -trimprefix=Phase"; DO NOT EDIT. + +package webapi + +import ( + "fmt" +) + +const _PhaseName = "NotStartedAllocationTokenAcquiredResourcesCreatedSucceededUserFailureSystemFailure" + +var _PhaseIndex = [...]uint8{0, 10, 33, 49, 58, 69, 82} + +func (i Phase) String() string { + if i < 0 || i >= Phase(len(_PhaseIndex)-1) { + return fmt.Sprintf("Phase(%d)", i) + } + return _PhaseName[_PhaseIndex[i]:_PhaseIndex[i+1]] +} + +var _PhaseValues = []Phase{0, 1, 2, 3, 4, 5} + +var _PhaseNameToValueMap = map[string]Phase{ + _PhaseName[0:10]: 0, + _PhaseName[10:33]: 1, + _PhaseName[33:49]: 2, + _PhaseName[49:58]: 3, + _PhaseName[58:69]: 4, + _PhaseName[69:82]: 5, +} + +// PhaseString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func PhaseString(s string) (Phase, error) { + if val, ok := _PhaseNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to Phase values", s) +} + +// PhaseValues returns all values of the enum +func PhaseValues() []Phase { + return _PhaseValues +} + +// IsAPhase returns "true" if the value is listed in the enum definition. "false" otherwise +func (i Phase) IsAPhase() bool { + for _, v := range _PhaseValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/plugin_context.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/plugin_context.go new file mode 100644 index 0000000000..75d20dfd27 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/plugin_context.go @@ -0,0 +1,34 @@ +package webapi + +import ( + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" +) + +type pluginContext struct { + webapi.TaskExecutionContext + + resourceMeta webapi.ResourceMeta + resource webapi.Resource + reason string +} + +func (p pluginContext) Reason() string { + return p.reason +} + +func (p pluginContext) Resource() webapi.Resource { + return p.resource +} + +func (p pluginContext) ResourceMeta() webapi.ResourceMeta { + return p.resourceMeta +} + +func newPluginContext(resourceMeta webapi.ResourceMeta, resource webapi.Resource, reason string, tCtx webapi.TaskExecutionContext) pluginContext { + return pluginContext{ + TaskExecutionContext: tCtx, + resourceMeta: resourceMeta, + resource: resource, + reason: reason, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state.go new file mode 100644 index 0000000000..013b9c2103 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state.go @@ -0,0 +1,62 @@ +package webapi + +import ( + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" +) + +//go:generate enumer -type=Phase -trimprefix=Phase + +// Phase represents current phase of the execution +type Phase int + +const ( + // PhaseNotStarted the default phase. + PhaseNotStarted Phase = iota + + // PhaseAllocationTokenAcquired once all required tokens have been acquired. The task is ready to be executed + // remotely. + PhaseAllocationTokenAcquired + + // PhaseResourcesCreated indicates the task has been created remotely. + PhaseResourcesCreated + + // The resource has successfully been executed remotely. + PhaseSucceeded + + // The resource has failed to be executed. + PhaseUserFailure + + // The resource has failed to be executed due to a system error. + PhaseSystemFailure +) + +func (i Phase) IsTerminal() bool { + return i == PhaseSucceeded || i == PhaseUserFailure || i == PhaseSystemFailure +} + +// State is the persisted State of the resource. +type State struct { + // Phase current phase of the resource. + Phase Phase `json:"phase,omitempty"` + + // PhaseVersion is the version of the phase. This is used to detect if the phase has changed since the last time + PhaseVersion uint32 + + // ResourceMeta contain metadata about resource this task created. This can be a complex structure or a simple type + // (e.g. a string). It should contain enough information for the plugin to interact (retrieve, check status, delete) + // with the resource through the remote service. + ResourceMeta webapi.ResourceMeta `json:"resourceMeta,omitempty"` + + // This number keeps track of the number of failures within the sync function. Without this, what happens in + // the sync function is entirely opaque. Note that this field is completely orthogonal to Flyte system/node/task + // level retries, just errors from hitting API, inside the sync loop + SyncFailureCount int `json:"syncFailureCount,omitempty"` + + // The time the execution first requests for an allocation token + AllocationTokenRequestStartTime time.Time `json:"allocationTokenRequestStartTime,omitempty"` + + // ErrorMessage generated during cache synchronization. + ErrorMessage string `json:"error_message,omitempty"` +} diff --git a/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state_test.go b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state_test.go new file mode 100644 index 0000000000..06fcdd3cb2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/internal/webapi/state_test.go @@ -0,0 +1,24 @@ +package webapi + +import "testing" + +func TestPhase_IsTerminal(t *testing.T) { + tests := []struct { + p Phase + want bool + }{ + {PhaseNotStarted, false}, + {PhaseAllocationTokenAcquired, false}, + {PhaseResourcesCreated, false}, + {PhaseSucceeded, true}, + {PhaseSystemFailure, true}, + {PhaseUserFailure, true}, + } + for _, tt := range tests { + t.Run(tt.p.String(), func(t *testing.T) { + if got := tt.p.IsTerminal(); got != tt.want { + t.Errorf("IsTerminal() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/iface.go b/flyteplugins/go/tasks/pluginmachinery/io/iface.go new file mode 100644 index 0000000000..874b7f4cc0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/iface.go @@ -0,0 +1,102 @@ +package io + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +//go:generate mockery -all -case=underscore + +// InputFilePaths contains the different ways available for downstream systems to retrieve inputs. +// If using Files for IO with tasks, then the input will be written to this path. All the files are always created in a +// sandbox per execution +type InputFilePaths interface { + // GetInputPrefixPath returns the inputs file path, minus the protobuf file name. + GetInputPrefixPath() storage.DataReference + // GetInputPath returns a path for where the protobuf encoded inputs of type `core.LiteralMap` can be found. The returned value is an URN in the configured storage backend + GetInputPath() storage.DataReference +} + +// InputReader provides a method to access the inputs for a task execution within the plugin's Task Context +type InputReader interface { + InputFilePaths + // Get the inputs for this task as a literal map, an error is returned only in case of systemic errors. + // No outputs or void is indicated using *core.LiteralMap -> nil + Get(ctx context.Context) (*core.LiteralMap, error) +} + +// OutputReader provides an abstracted OutputReader interface. The plugins are responsible to provide +// the implementations for the interface. Some helper implementations can be found in ioutils +type OutputReader interface { + // IsError returns true if an error was detected when reading the output and false if no error was detected + IsError(ctx context.Context) (bool, error) + // ReadError returns the error as type ExecutionError + ReadError(ctx context.Context) (ExecutionError, error) + // IsFile returns true if the outputs are using the OutputFilePaths specified files. If so it allows the system to + // optimize the reads of the files + IsFile(ctx context.Context) bool + // Exists returns true if the output exists false otherwise + Exists(ctx context.Context) (bool, error) + // Read returns the output -> *core.LiteralMap (nil if void), *ExecutionError if user error when reading the output and error to indicate system problems + Read(ctx context.Context) (*core.LiteralMap, *ExecutionError, error) + // DeckExists checks if the deck file has been generated. + DeckExists(ctx context.Context) (bool, error) +} + +// CheckpointPaths provides the paths / keys to input Checkpoints directory and an output checkpoints directory. +type CheckpointPaths interface { + // GetPreviousCheckpointsPrefix returns the storage prefix for checkpoints for the previous iteration / attempt. + // It is optional and can be an empty string in some cases + GetPreviousCheckpointsPrefix() storage.DataReference + // GetCheckpointPrefix returns the storage prefix that should be used to store checkpoints for the current attempt + // The path is not accessible to Flyte backend and are stored in the users raw path + GetCheckpointPrefix() storage.DataReference +} + +// RawOutputPaths is the actual path where the data produced by a task can be placed. It is completely optional. The advantage +// of using this path is to provide exactly once semantics. It is guaranteed that this path is unique for every new execution +// of a task (across retries etc) and is constant for a specific execution. +// As of 02/20/2020 Flytekit generates this path randomly for S3. This structure proposes migration of this logic to +// FlytePluginMachinery so that it can be used more universally outside of Flytekit. +type RawOutputPaths interface { + // GetRawOutputPrefix is the prefix (blob store prefix or directory) where all data produced can be stored. + GetRawOutputPrefix() storage.DataReference +} + +// OutputFilePaths contains and provides all paths where various meta outputs produced by the task can be placed, +// such that the framework can directly access them. Every path is represented using storage.DataReference -> +// an URN for the configured storage backend +type OutputFilePaths interface { + // RawOutputPaths are available with OutputFilePaths + RawOutputPaths + + // CheckpointPaths that can be optionally used to checkpoint + CheckpointPaths + + // GetOutputPrefixPath returns a path to a directory or prefix that contains all execution metadata for this execution + GetOutputPrefixPath() storage.DataReference + // GetOutputPath returns a fully qualified path (URN) to where the framework expects the output to exist in the configured storage backend + GetOutputPath() storage.DataReference + // GetDeckPath returns a fully qualified path (URN) to where the framework expects the deck.html to exist in the configured storage backend + GetDeckPath() storage.DataReference + // GetErrorPath returns a fully qualified path (URN) where the error information should be placed as a protobuf core.ErrorDocument. It is not directly + // used by the framework, but could be used in the future + GetErrorPath() storage.DataReference +} + +// OutputWriter provides an interface to write back the outputs to the engine. +type OutputWriter interface { + OutputFilePaths + // Put Once the task completes, use this method to indicate the output accessor to the framework + Put(ctx context.Context, reader OutputReader) error +} + +// ExecutionError Indicates any error in executing the task +type ExecutionError struct { + // Core error structure + *core.ExecutionError + // Indicates if this error is recoverable + IsRecoverable bool +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/checkpoint_paths.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/checkpoint_paths.go new file mode 100644 index 0000000000..66d27cbf4c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/checkpoint_paths.go @@ -0,0 +1,77 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" + mock "github.com/stretchr/testify/mock" +) + +// CheckpointPaths is an autogenerated mock type for the CheckpointPaths type +type CheckpointPaths struct { + mock.Mock +} + +type CheckpointPaths_GetCheckpointPrefix struct { + *mock.Call +} + +func (_m CheckpointPaths_GetCheckpointPrefix) Return(_a0 storage.DataReference) *CheckpointPaths_GetCheckpointPrefix { + return &CheckpointPaths_GetCheckpointPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *CheckpointPaths) OnGetCheckpointPrefix() *CheckpointPaths_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix") + return &CheckpointPaths_GetCheckpointPrefix{Call: c_call} +} + +func (_m *CheckpointPaths) OnGetCheckpointPrefixMatch(matchers ...interface{}) *CheckpointPaths_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix", matchers...) + return &CheckpointPaths_GetCheckpointPrefix{Call: c_call} +} + +// GetCheckpointPrefix provides a mock function with given fields: +func (_m *CheckpointPaths) GetCheckpointPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type CheckpointPaths_GetPreviousCheckpointsPrefix struct { + *mock.Call +} + +func (_m CheckpointPaths_GetPreviousCheckpointsPrefix) Return(_a0 storage.DataReference) *CheckpointPaths_GetPreviousCheckpointsPrefix { + return &CheckpointPaths_GetPreviousCheckpointsPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *CheckpointPaths) OnGetPreviousCheckpointsPrefix() *CheckpointPaths_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix") + return &CheckpointPaths_GetPreviousCheckpointsPrefix{Call: c_call} +} + +func (_m *CheckpointPaths) OnGetPreviousCheckpointsPrefixMatch(matchers ...interface{}) *CheckpointPaths_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix", matchers...) + return &CheckpointPaths_GetPreviousCheckpointsPrefix{Call: c_call} +} + +// GetPreviousCheckpointsPrefix provides a mock function with given fields: +func (_m *CheckpointPaths) GetPreviousCheckpointsPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_file_paths.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_file_paths.go new file mode 100644 index 0000000000..773590e4d0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_file_paths.go @@ -0,0 +1,77 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" + mock "github.com/stretchr/testify/mock" +) + +// InputFilePaths is an autogenerated mock type for the InputFilePaths type +type InputFilePaths struct { + mock.Mock +} + +type InputFilePaths_GetInputPath struct { + *mock.Call +} + +func (_m InputFilePaths_GetInputPath) Return(_a0 storage.DataReference) *InputFilePaths_GetInputPath { + return &InputFilePaths_GetInputPath{Call: _m.Call.Return(_a0)} +} + +func (_m *InputFilePaths) OnGetInputPath() *InputFilePaths_GetInputPath { + c_call := _m.On("GetInputPath") + return &InputFilePaths_GetInputPath{Call: c_call} +} + +func (_m *InputFilePaths) OnGetInputPathMatch(matchers ...interface{}) *InputFilePaths_GetInputPath { + c_call := _m.On("GetInputPath", matchers...) + return &InputFilePaths_GetInputPath{Call: c_call} +} + +// GetInputPath provides a mock function with given fields: +func (_m *InputFilePaths) GetInputPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type InputFilePaths_GetInputPrefixPath struct { + *mock.Call +} + +func (_m InputFilePaths_GetInputPrefixPath) Return(_a0 storage.DataReference) *InputFilePaths_GetInputPrefixPath { + return &InputFilePaths_GetInputPrefixPath{Call: _m.Call.Return(_a0)} +} + +func (_m *InputFilePaths) OnGetInputPrefixPath() *InputFilePaths_GetInputPrefixPath { + c_call := _m.On("GetInputPrefixPath") + return &InputFilePaths_GetInputPrefixPath{Call: c_call} +} + +func (_m *InputFilePaths) OnGetInputPrefixPathMatch(matchers ...interface{}) *InputFilePaths_GetInputPrefixPath { + c_call := _m.On("GetInputPrefixPath", matchers...) + return &InputFilePaths_GetInputPrefixPath{Call: c_call} +} + +// GetInputPrefixPath provides a mock function with given fields: +func (_m *InputFilePaths) GetInputPrefixPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_reader.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_reader.go new file mode 100644 index 0000000000..10f713d4f3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/input_reader.go @@ -0,0 +1,123 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// InputReader is an autogenerated mock type for the InputReader type +type InputReader struct { + mock.Mock +} + +type InputReader_Get struct { + *mock.Call +} + +func (_m InputReader_Get) Return(_a0 *core.LiteralMap, _a1 error) *InputReader_Get { + return &InputReader_Get{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *InputReader) OnGet(ctx context.Context) *InputReader_Get { + c_call := _m.On("Get", ctx) + return &InputReader_Get{Call: c_call} +} + +func (_m *InputReader) OnGetMatch(matchers ...interface{}) *InputReader_Get { + c_call := _m.On("Get", matchers...) + return &InputReader_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx +func (_m *InputReader) Get(ctx context.Context) (*core.LiteralMap, error) { + ret := _m.Called(ctx) + + var r0 *core.LiteralMap + if rf, ok := ret.Get(0).(func(context.Context) *core.LiteralMap); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.LiteralMap) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type InputReader_GetInputPath struct { + *mock.Call +} + +func (_m InputReader_GetInputPath) Return(_a0 storage.DataReference) *InputReader_GetInputPath { + return &InputReader_GetInputPath{Call: _m.Call.Return(_a0)} +} + +func (_m *InputReader) OnGetInputPath() *InputReader_GetInputPath { + c_call := _m.On("GetInputPath") + return &InputReader_GetInputPath{Call: c_call} +} + +func (_m *InputReader) OnGetInputPathMatch(matchers ...interface{}) *InputReader_GetInputPath { + c_call := _m.On("GetInputPath", matchers...) + return &InputReader_GetInputPath{Call: c_call} +} + +// GetInputPath provides a mock function with given fields: +func (_m *InputReader) GetInputPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type InputReader_GetInputPrefixPath struct { + *mock.Call +} + +func (_m InputReader_GetInputPrefixPath) Return(_a0 storage.DataReference) *InputReader_GetInputPrefixPath { + return &InputReader_GetInputPrefixPath{Call: _m.Call.Return(_a0)} +} + +func (_m *InputReader) OnGetInputPrefixPath() *InputReader_GetInputPrefixPath { + c_call := _m.On("GetInputPrefixPath") + return &InputReader_GetInputPrefixPath{Call: c_call} +} + +func (_m *InputReader) OnGetInputPrefixPathMatch(matchers ...interface{}) *InputReader_GetInputPrefixPath { + c_call := _m.On("GetInputPrefixPath", matchers...) + return &InputReader_GetInputPrefixPath{Call: c_call} +} + +// GetInputPrefixPath provides a mock function with given fields: +func (_m *InputReader) GetInputPrefixPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_data_sandbox.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_data_sandbox.go new file mode 100644 index 0000000000..29034d0184 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_data_sandbox.go @@ -0,0 +1,45 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" + mock "github.com/stretchr/testify/mock" +) + +// RawOutputPaths is an autogenerated mock type for the RawOutputPaths type +type OutputDataSandbox struct { + mock.Mock +} + +type OutputDataSandbox_GetOutputDataSandboxPath struct { + *mock.Call +} + +func (_m OutputDataSandbox_GetOutputDataSandboxPath) Return(_a0 storage.DataReference) *OutputDataSandbox_GetOutputDataSandboxPath { + return &OutputDataSandbox_GetOutputDataSandboxPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputDataSandbox) OnGetOutputDataSandboxPath() *OutputDataSandbox_GetOutputDataSandboxPath { + c := _m.On("GetRawOutputPrefix") + return &OutputDataSandbox_GetOutputDataSandboxPath{Call: c} +} + +func (_m *OutputDataSandbox) OnGetOutputDataSandboxPathMatch(matchers ...interface{}) *OutputDataSandbox_GetOutputDataSandboxPath { + c := _m.On("GetRawOutputPrefix", matchers...) + return &OutputDataSandbox_GetOutputDataSandboxPath{Call: c} +} + +// GetRawOutputPrefix provides a mock function with given fields: +func (_m *OutputDataSandbox) GetRawOutputPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_file_paths.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_file_paths.go new file mode 100644 index 0000000000..0ec0906673 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_file_paths.go @@ -0,0 +1,237 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" + mock "github.com/stretchr/testify/mock" +) + +// OutputFilePaths is an autogenerated mock type for the OutputFilePaths type +type OutputFilePaths struct { + mock.Mock +} + +type OutputFilePaths_GetCheckpointPrefix struct { + *mock.Call +} + +func (_m OutputFilePaths_GetCheckpointPrefix) Return(_a0 storage.DataReference) *OutputFilePaths_GetCheckpointPrefix { + return &OutputFilePaths_GetCheckpointPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetCheckpointPrefix() *OutputFilePaths_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix") + return &OutputFilePaths_GetCheckpointPrefix{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetCheckpointPrefixMatch(matchers ...interface{}) *OutputFilePaths_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix", matchers...) + return &OutputFilePaths_GetCheckpointPrefix{Call: c_call} +} + +// GetCheckpointPrefix provides a mock function with given fields: +func (_m *OutputFilePaths) GetCheckpointPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetDeckPath struct { + *mock.Call +} + +func (_m OutputFilePaths_GetDeckPath) Return(_a0 storage.DataReference) *OutputFilePaths_GetDeckPath { + return &OutputFilePaths_GetDeckPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetDeckPath() *OutputFilePaths_GetDeckPath { + c_call := _m.On("GetDeckPath") + return &OutputFilePaths_GetDeckPath{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetDeckPathMatch(matchers ...interface{}) *OutputFilePaths_GetDeckPath { + c_call := _m.On("GetDeckPath", matchers...) + return &OutputFilePaths_GetDeckPath{Call: c_call} +} + +// GetDeckPath provides a mock function with given fields: +func (_m *OutputFilePaths) GetDeckPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetErrorPath struct { + *mock.Call +} + +func (_m OutputFilePaths_GetErrorPath) Return(_a0 storage.DataReference) *OutputFilePaths_GetErrorPath { + return &OutputFilePaths_GetErrorPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetErrorPath() *OutputFilePaths_GetErrorPath { + c_call := _m.On("GetErrorPath") + return &OutputFilePaths_GetErrorPath{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetErrorPathMatch(matchers ...interface{}) *OutputFilePaths_GetErrorPath { + c_call := _m.On("GetErrorPath", matchers...) + return &OutputFilePaths_GetErrorPath{Call: c_call} +} + +// GetErrorPath provides a mock function with given fields: +func (_m *OutputFilePaths) GetErrorPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetOutputPath struct { + *mock.Call +} + +func (_m OutputFilePaths_GetOutputPath) Return(_a0 storage.DataReference) *OutputFilePaths_GetOutputPath { + return &OutputFilePaths_GetOutputPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetOutputPath() *OutputFilePaths_GetOutputPath { + c_call := _m.On("GetOutputPath") + return &OutputFilePaths_GetOutputPath{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetOutputPathMatch(matchers ...interface{}) *OutputFilePaths_GetOutputPath { + c_call := _m.On("GetOutputPath", matchers...) + return &OutputFilePaths_GetOutputPath{Call: c_call} +} + +// GetOutputPath provides a mock function with given fields: +func (_m *OutputFilePaths) GetOutputPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetOutputPrefixPath struct { + *mock.Call +} + +func (_m OutputFilePaths_GetOutputPrefixPath) Return(_a0 storage.DataReference) *OutputFilePaths_GetOutputPrefixPath { + return &OutputFilePaths_GetOutputPrefixPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetOutputPrefixPath() *OutputFilePaths_GetOutputPrefixPath { + c_call := _m.On("GetOutputPrefixPath") + return &OutputFilePaths_GetOutputPrefixPath{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetOutputPrefixPathMatch(matchers ...interface{}) *OutputFilePaths_GetOutputPrefixPath { + c_call := _m.On("GetOutputPrefixPath", matchers...) + return &OutputFilePaths_GetOutputPrefixPath{Call: c_call} +} + +// GetOutputPrefixPath provides a mock function with given fields: +func (_m *OutputFilePaths) GetOutputPrefixPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetPreviousCheckpointsPrefix struct { + *mock.Call +} + +func (_m OutputFilePaths_GetPreviousCheckpointsPrefix) Return(_a0 storage.DataReference) *OutputFilePaths_GetPreviousCheckpointsPrefix { + return &OutputFilePaths_GetPreviousCheckpointsPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetPreviousCheckpointsPrefix() *OutputFilePaths_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix") + return &OutputFilePaths_GetPreviousCheckpointsPrefix{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetPreviousCheckpointsPrefixMatch(matchers ...interface{}) *OutputFilePaths_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix", matchers...) + return &OutputFilePaths_GetPreviousCheckpointsPrefix{Call: c_call} +} + +// GetPreviousCheckpointsPrefix provides a mock function with given fields: +func (_m *OutputFilePaths) GetPreviousCheckpointsPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputFilePaths_GetRawOutputPrefix struct { + *mock.Call +} + +func (_m OutputFilePaths_GetRawOutputPrefix) Return(_a0 storage.DataReference) *OutputFilePaths_GetRawOutputPrefix { + return &OutputFilePaths_GetRawOutputPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputFilePaths) OnGetRawOutputPrefix() *OutputFilePaths_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix") + return &OutputFilePaths_GetRawOutputPrefix{Call: c_call} +} + +func (_m *OutputFilePaths) OnGetRawOutputPrefixMatch(matchers ...interface{}) *OutputFilePaths_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix", matchers...) + return &OutputFilePaths_GetRawOutputPrefix{Call: c_call} +} + +// GetRawOutputPrefix provides a mock function with given fields: +func (_m *OutputFilePaths) GetRawOutputPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_reader.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_reader.go new file mode 100644 index 0000000000..c4ad4b7838 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_reader.go @@ -0,0 +1,255 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" +) + +// OutputReader is an autogenerated mock type for the OutputReader type +type OutputReader struct { + mock.Mock +} + +type OutputReader_DeckExists struct { + *mock.Call +} + +func (_m OutputReader_DeckExists) Return(_a0 bool, _a1 error) *OutputReader_DeckExists { + return &OutputReader_DeckExists{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *OutputReader) OnDeckExists(ctx context.Context) *OutputReader_DeckExists { + c_call := _m.On("DeckExists", ctx) + return &OutputReader_DeckExists{Call: c_call} +} + +func (_m *OutputReader) OnDeckExistsMatch(matchers ...interface{}) *OutputReader_DeckExists { + c_call := _m.On("DeckExists", matchers...) + return &OutputReader_DeckExists{Call: c_call} +} + +// DeckExists provides a mock function with given fields: ctx +func (_m *OutputReader) DeckExists(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type OutputReader_Exists struct { + *mock.Call +} + +func (_m OutputReader_Exists) Return(_a0 bool, _a1 error) *OutputReader_Exists { + return &OutputReader_Exists{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *OutputReader) OnExists(ctx context.Context) *OutputReader_Exists { + c_call := _m.On("Exists", ctx) + return &OutputReader_Exists{Call: c_call} +} + +func (_m *OutputReader) OnExistsMatch(matchers ...interface{}) *OutputReader_Exists { + c_call := _m.On("Exists", matchers...) + return &OutputReader_Exists{Call: c_call} +} + +// Exists provides a mock function with given fields: ctx +func (_m *OutputReader) Exists(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type OutputReader_IsError struct { + *mock.Call +} + +func (_m OutputReader_IsError) Return(_a0 bool, _a1 error) *OutputReader_IsError { + return &OutputReader_IsError{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *OutputReader) OnIsError(ctx context.Context) *OutputReader_IsError { + c_call := _m.On("IsError", ctx) + return &OutputReader_IsError{Call: c_call} +} + +func (_m *OutputReader) OnIsErrorMatch(matchers ...interface{}) *OutputReader_IsError { + c_call := _m.On("IsError", matchers...) + return &OutputReader_IsError{Call: c_call} +} + +// IsError provides a mock function with given fields: ctx +func (_m *OutputReader) IsError(ctx context.Context) (bool, error) { + ret := _m.Called(ctx) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type OutputReader_IsFile struct { + *mock.Call +} + +func (_m OutputReader_IsFile) Return(_a0 bool) *OutputReader_IsFile { + return &OutputReader_IsFile{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputReader) OnIsFile(ctx context.Context) *OutputReader_IsFile { + c_call := _m.On("IsFile", ctx) + return &OutputReader_IsFile{Call: c_call} +} + +func (_m *OutputReader) OnIsFileMatch(matchers ...interface{}) *OutputReader_IsFile { + c_call := _m.On("IsFile", matchers...) + return &OutputReader_IsFile{Call: c_call} +} + +// IsFile provides a mock function with given fields: ctx +func (_m *OutputReader) IsFile(ctx context.Context) bool { + ret := _m.Called(ctx) + + var r0 bool + if rf, ok := ret.Get(0).(func(context.Context) bool); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +type OutputReader_Read struct { + *mock.Call +} + +func (_m OutputReader_Read) Return(_a0 *core.LiteralMap, _a1 *io.ExecutionError, _a2 error) *OutputReader_Read { + return &OutputReader_Read{Call: _m.Call.Return(_a0, _a1, _a2)} +} + +func (_m *OutputReader) OnRead(ctx context.Context) *OutputReader_Read { + c_call := _m.On("Read", ctx) + return &OutputReader_Read{Call: c_call} +} + +func (_m *OutputReader) OnReadMatch(matchers ...interface{}) *OutputReader_Read { + c_call := _m.On("Read", matchers...) + return &OutputReader_Read{Call: c_call} +} + +// Read provides a mock function with given fields: ctx +func (_m *OutputReader) Read(ctx context.Context) (*core.LiteralMap, *io.ExecutionError, error) { + ret := _m.Called(ctx) + + var r0 *core.LiteralMap + if rf, ok := ret.Get(0).(func(context.Context) *core.LiteralMap); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*core.LiteralMap) + } + } + + var r1 *io.ExecutionError + if rf, ok := ret.Get(1).(func(context.Context) *io.ExecutionError); ok { + r1 = rf(ctx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(*io.ExecutionError) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context) error); ok { + r2 = rf(ctx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type OutputReader_ReadError struct { + *mock.Call +} + +func (_m OutputReader_ReadError) Return(_a0 io.ExecutionError, _a1 error) *OutputReader_ReadError { + return &OutputReader_ReadError{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *OutputReader) OnReadError(ctx context.Context) *OutputReader_ReadError { + c_call := _m.On("ReadError", ctx) + return &OutputReader_ReadError{Call: c_call} +} + +func (_m *OutputReader) OnReadErrorMatch(matchers ...interface{}) *OutputReader_ReadError { + c_call := _m.On("ReadError", matchers...) + return &OutputReader_ReadError{Call: c_call} +} + +// ReadError provides a mock function with given fields: ctx +func (_m *OutputReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + ret := _m.Called(ctx) + + var r0 io.ExecutionError + if rf, ok := ret.Get(0).(func(context.Context) io.ExecutionError); ok { + r0 = rf(ctx) + } else { + r0 = ret.Get(0).(io.ExecutionError) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_writer.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_writer.go new file mode 100644 index 0000000000..b3cce7012d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/output_writer.go @@ -0,0 +1,273 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// OutputWriter is an autogenerated mock type for the OutputWriter type +type OutputWriter struct { + mock.Mock +} + +type OutputWriter_GetCheckpointPrefix struct { + *mock.Call +} + +func (_m OutputWriter_GetCheckpointPrefix) Return(_a0 storage.DataReference) *OutputWriter_GetCheckpointPrefix { + return &OutputWriter_GetCheckpointPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetCheckpointPrefix() *OutputWriter_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix") + return &OutputWriter_GetCheckpointPrefix{Call: c_call} +} + +func (_m *OutputWriter) OnGetCheckpointPrefixMatch(matchers ...interface{}) *OutputWriter_GetCheckpointPrefix { + c_call := _m.On("GetCheckpointPrefix", matchers...) + return &OutputWriter_GetCheckpointPrefix{Call: c_call} +} + +// GetCheckpointPrefix provides a mock function with given fields: +func (_m *OutputWriter) GetCheckpointPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetDeckPath struct { + *mock.Call +} + +func (_m OutputWriter_GetDeckPath) Return(_a0 storage.DataReference) *OutputWriter_GetDeckPath { + return &OutputWriter_GetDeckPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetDeckPath() *OutputWriter_GetDeckPath { + c_call := _m.On("GetDeckPath") + return &OutputWriter_GetDeckPath{Call: c_call} +} + +func (_m *OutputWriter) OnGetDeckPathMatch(matchers ...interface{}) *OutputWriter_GetDeckPath { + c_call := _m.On("GetDeckPath", matchers...) + return &OutputWriter_GetDeckPath{Call: c_call} +} + +// GetDeckPath provides a mock function with given fields: +func (_m *OutputWriter) GetDeckPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetErrorPath struct { + *mock.Call +} + +func (_m OutputWriter_GetErrorPath) Return(_a0 storage.DataReference) *OutputWriter_GetErrorPath { + return &OutputWriter_GetErrorPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetErrorPath() *OutputWriter_GetErrorPath { + c_call := _m.On("GetErrorPath") + return &OutputWriter_GetErrorPath{Call: c_call} +} + +func (_m *OutputWriter) OnGetErrorPathMatch(matchers ...interface{}) *OutputWriter_GetErrorPath { + c_call := _m.On("GetErrorPath", matchers...) + return &OutputWriter_GetErrorPath{Call: c_call} +} + +// GetErrorPath provides a mock function with given fields: +func (_m *OutputWriter) GetErrorPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetOutputPath struct { + *mock.Call +} + +func (_m OutputWriter_GetOutputPath) Return(_a0 storage.DataReference) *OutputWriter_GetOutputPath { + return &OutputWriter_GetOutputPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetOutputPath() *OutputWriter_GetOutputPath { + c_call := _m.On("GetOutputPath") + return &OutputWriter_GetOutputPath{Call: c_call} +} + +func (_m *OutputWriter) OnGetOutputPathMatch(matchers ...interface{}) *OutputWriter_GetOutputPath { + c_call := _m.On("GetOutputPath", matchers...) + return &OutputWriter_GetOutputPath{Call: c_call} +} + +// GetOutputPath provides a mock function with given fields: +func (_m *OutputWriter) GetOutputPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetOutputPrefixPath struct { + *mock.Call +} + +func (_m OutputWriter_GetOutputPrefixPath) Return(_a0 storage.DataReference) *OutputWriter_GetOutputPrefixPath { + return &OutputWriter_GetOutputPrefixPath{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetOutputPrefixPath() *OutputWriter_GetOutputPrefixPath { + c_call := _m.On("GetOutputPrefixPath") + return &OutputWriter_GetOutputPrefixPath{Call: c_call} +} + +func (_m *OutputWriter) OnGetOutputPrefixPathMatch(matchers ...interface{}) *OutputWriter_GetOutputPrefixPath { + c_call := _m.On("GetOutputPrefixPath", matchers...) + return &OutputWriter_GetOutputPrefixPath{Call: c_call} +} + +// GetOutputPrefixPath provides a mock function with given fields: +func (_m *OutputWriter) GetOutputPrefixPath() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetPreviousCheckpointsPrefix struct { + *mock.Call +} + +func (_m OutputWriter_GetPreviousCheckpointsPrefix) Return(_a0 storage.DataReference) *OutputWriter_GetPreviousCheckpointsPrefix { + return &OutputWriter_GetPreviousCheckpointsPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetPreviousCheckpointsPrefix() *OutputWriter_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix") + return &OutputWriter_GetPreviousCheckpointsPrefix{Call: c_call} +} + +func (_m *OutputWriter) OnGetPreviousCheckpointsPrefixMatch(matchers ...interface{}) *OutputWriter_GetPreviousCheckpointsPrefix { + c_call := _m.On("GetPreviousCheckpointsPrefix", matchers...) + return &OutputWriter_GetPreviousCheckpointsPrefix{Call: c_call} +} + +// GetPreviousCheckpointsPrefix provides a mock function with given fields: +func (_m *OutputWriter) GetPreviousCheckpointsPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_GetRawOutputPrefix struct { + *mock.Call +} + +func (_m OutputWriter_GetRawOutputPrefix) Return(_a0 storage.DataReference) *OutputWriter_GetRawOutputPrefix { + return &OutputWriter_GetRawOutputPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnGetRawOutputPrefix() *OutputWriter_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix") + return &OutputWriter_GetRawOutputPrefix{Call: c_call} +} + +func (_m *OutputWriter) OnGetRawOutputPrefixMatch(matchers ...interface{}) *OutputWriter_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix", matchers...) + return &OutputWriter_GetRawOutputPrefix{Call: c_call} +} + +// GetRawOutputPrefix provides a mock function with given fields: +func (_m *OutputWriter) GetRawOutputPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} + +type OutputWriter_Put struct { + *mock.Call +} + +func (_m OutputWriter_Put) Return(_a0 error) *OutputWriter_Put { + return &OutputWriter_Put{Call: _m.Call.Return(_a0)} +} + +func (_m *OutputWriter) OnPut(ctx context.Context, reader io.OutputReader) *OutputWriter_Put { + c_call := _m.On("Put", ctx, reader) + return &OutputWriter_Put{Call: c_call} +} + +func (_m *OutputWriter) OnPutMatch(matchers ...interface{}) *OutputWriter_Put { + c_call := _m.On("Put", matchers...) + return &OutputWriter_Put{Call: c_call} +} + +// Put provides a mock function with given fields: ctx, reader +func (_m *OutputWriter) Put(ctx context.Context, reader io.OutputReader) error { + ret := _m.Called(ctx, reader) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, io.OutputReader) error); ok { + r0 = rf(ctx, reader) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/io/mocks/raw_output_paths.go b/flyteplugins/go/tasks/pluginmachinery/io/mocks/raw_output_paths.go new file mode 100644 index 0000000000..c21dde1e26 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/io/mocks/raw_output_paths.go @@ -0,0 +1,45 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" + mock "github.com/stretchr/testify/mock" +) + +// RawOutputPaths is an autogenerated mock type for the RawOutputPaths type +type RawOutputPaths struct { + mock.Mock +} + +type RawOutputPaths_GetRawOutputPrefix struct { + *mock.Call +} + +func (_m RawOutputPaths_GetRawOutputPrefix) Return(_a0 storage.DataReference) *RawOutputPaths_GetRawOutputPrefix { + return &RawOutputPaths_GetRawOutputPrefix{Call: _m.Call.Return(_a0)} +} + +func (_m *RawOutputPaths) OnGetRawOutputPrefix() *RawOutputPaths_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix") + return &RawOutputPaths_GetRawOutputPrefix{Call: c_call} +} + +func (_m *RawOutputPaths) OnGetRawOutputPrefixMatch(matchers ...interface{}) *RawOutputPaths_GetRawOutputPrefix { + c_call := _m.On("GetRawOutputPrefix", matchers...) + return &RawOutputPaths_GetRawOutputPrefix{Call: c_call} +} + +// GetRawOutputPrefix provides a mock function with given fields: +func (_m *RawOutputPaths) GetRawOutputPrefix() storage.DataReference { + ret := _m.Called() + + var r0 storage.DataReference + if rf, ok := ret.Get(0).(func() storage.DataReference); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(storage.DataReference) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/buffered_output_writer.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/buffered_output_writer.go new file mode 100644 index 0000000000..d6da8c7fe3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/buffered_output_writer.go @@ -0,0 +1,29 @@ +package ioutils + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" +) + +// A Buffered outputWriter just records the io.OutputReader and can be accessed using special methods. +type BufferedOutputWriter struct { + io.OutputFilePaths + outReader io.OutputReader +} + +func (o *BufferedOutputWriter) Put(ctx context.Context, reader io.OutputReader) error { + o.outReader = reader + return nil +} + +func (o *BufferedOutputWriter) GetReader() io.OutputReader { + return o.outReader +} + +// Returns a new object of type BufferedOutputWriter +func NewBufferedOutputWriter(ctx context.Context, paths io.OutputFilePaths) *BufferedOutputWriter { + return &BufferedOutputWriter{ + OutputFilePaths: paths, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/cached_input_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/cached_input_reader.go new file mode 100644 index 0000000000..ee12a31d7b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/cached_input_reader.go @@ -0,0 +1,32 @@ +package ioutils + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type cachedInputReader struct { + io.InputReader + cachedInputs *core.LiteralMap +} + +func (c *cachedInputReader) Get(ctx context.Context) (*core.LiteralMap, error) { + if c.cachedInputs == nil { + in, err := c.InputReader.Get(ctx) + if err != nil { + return nil, err + } + c.cachedInputs = in + } + return c.cachedInputs, nil +} + +// Creates a new Read-through cached Input Reader. the returned reader is not thread-safe +// It caches the inputs on a successful read from the underlying input reader +func NewCachedInputReader(ctx context.Context, in io.InputReader) io.InputReader { + return &cachedInputReader{ + InputReader: in, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/config.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/config.go new file mode 100644 index 0000000000..ad50995a9c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/config.go @@ -0,0 +1,27 @@ +package ioutils + +import ( + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var cfgSection = config.MustRegisterSubSection("ioutils", defaultConfig) + +type Config struct { + RemoteFileOutputPathsConfig RemoteFileOutputPathsConfig `json:"remoteFileOutputPaths" pflag:",Config for remote file output paths."` +} + +type RemoteFileOutputPathsConfig struct { + DeckFilename string `json:"deckFilename" pflag:",Filename to use for the deck file."` +} + +var defaultConfig = &Config{ + RemoteFileOutputPathsConfig: RemoteFileOutputPathsConfig{ + DeckFilename: "deck.html", + }, +} + +func GetConfig() *Config { + return cfgSection.GetConfig().(*Config) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags.go new file mode 100755 index 0000000000..2658e74658 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package ioutils + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "remoteFileOutputPaths.deckFilename"), defaultConfig.RemoteFileOutputPathsConfig.DeckFilename, "Filename to use for the deck file.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags_test.go new file mode 100755 index 0000000000..4db1b9fd1b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/config_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package ioutils + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_remoteFileOutputPaths.deckFilename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("remoteFileOutputPaths.deckFilename", testValue) + if vString, err := cmdFlags.GetString("remoteFileOutputPaths.deckFilename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.RemoteFileOutputPathsConfig.DeckFilename) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/data_sharder.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/data_sharder.go new file mode 100644 index 0000000000..ae467afb0d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/data_sharder.go @@ -0,0 +1,8 @@ +package ioutils + +import "context" + +// This interface allows shard selection for OutputSandbox. +type ShardSelector interface { + GetShardPrefix(ctx context.Context, s []byte) (string, error) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/doc.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/doc.go new file mode 100644 index 0000000000..bc0381a283 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/doc.go @@ -0,0 +1,5 @@ +// Package ioutils contains utilities for interacting with the IO Layer of FlytePropeller Metastore +// For example, utilities like reading inputs, writing outputs, computing output paths, prefixes. +// These helpers are intended to be used by FlytePropeller and aim to reduce the burden of implementing simple +// io functions +package ioutils diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader.go new file mode 100644 index 0000000000..55da5b16d6 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader.go @@ -0,0 +1,55 @@ +package ioutils + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type InMemoryOutputReader struct { + literals *core.LiteralMap + DeckPath *storage.DataReference + err *io.ExecutionError +} + +var _ io.OutputReader = InMemoryOutputReader{} + +func (r InMemoryOutputReader) IsError(ctx context.Context) (bool, error) { + return r.err != nil, nil +} + +func (r InMemoryOutputReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + if r.err != nil { + return *r.err, nil + } + + return io.ExecutionError{}, fmt.Errorf("no execution error specified") +} + +func (r InMemoryOutputReader) IsFile(_ context.Context) bool { + return false +} + +func (r InMemoryOutputReader) Exists(_ context.Context) (bool, error) { + // TODO: should this return true if there is an error? + return r.literals != nil, nil +} + +func (r InMemoryOutputReader) Read(_ context.Context) (*core.LiteralMap, *io.ExecutionError, error) { + return r.literals, r.err, nil +} + +func (r InMemoryOutputReader) DeckExists(_ context.Context) (bool, error) { + return r.DeckPath != nil, nil +} + +func NewInMemoryOutputReader(literals *core.LiteralMap, DeckPath *storage.DataReference, err *io.ExecutionError) InMemoryOutputReader { + return InMemoryOutputReader{ + literals: literals, + DeckPath: DeckPath, + err: err, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go new file mode 100644 index 0000000000..8801c5819c --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/in_memory_output_reader_test.go @@ -0,0 +1,45 @@ +package ioutils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestInMemoryOutputReader(t *testing.T) { + deckPath := storage.DataReference("s3://bucket/key") + lt := map[string]*flyteIdlCore.Literal{ + "results": { + Value: &flyteIdlCore.Literal_Scalar{ + Scalar: &flyteIdlCore.Scalar{ + Value: &flyteIdlCore.Scalar_Primitive{ + Primitive: &flyteIdlCore.Primitive{Value: &flyteIdlCore.Primitive_Integer{Integer: 3}}, + }, + }, + }, + }, + } + or := NewInMemoryOutputReader(&flyteIdlCore.LiteralMap{Literals: lt}, &deckPath, nil) + + assert.Equal(t, &deckPath, or.DeckPath) + ctx := context.TODO() + + ok, err := or.IsError(ctx) + assert.False(t, ok) + assert.NoError(t, err) + + assert.False(t, or.IsFile(ctx)) + + ok, err = or.Exists(ctx) + assert.True(t, ok) + assert.NoError(t, err) + + literalMap, executionErr, err := or.Read(ctx) + assert.Equal(t, lt, literalMap.Literals) + assert.Nil(t, executionErr) + assert.NoError(t, err) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/paths.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/paths.go new file mode 100644 index 0000000000..af68f0eade --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/paths.go @@ -0,0 +1,57 @@ +package ioutils + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +const ( + // InputsSuffix specifies the name of the file that contains the task inputs in the form core.LiteralMap + InputsSuffix = "inputs.pb" + // TaskTemplateSuffix In case a task requests for a task template, it is passed into the task using this filename. + // The format is of type core.TaskTemplate + TaskTemplateSuffix = "task.pb" + // FuturesSuffix specifies that for dynamic workflows, the futures files is written with this filename/suffix. + // The format is core.DynamicJobSpec + FuturesSuffix = "futures.pb" + // OutputsSuffix specifies that outputs are assumed to be written to this "file"/"suffix" under the given prefix + // The outputs file has a format of core.LiteralMap + OutputsSuffix = "outputs.pb" + // ErrorsSuffix specifies that the errors are written to this prefix/file under the given prefix. The Error File + // has a format of core.ErrorDocument + ErrorsSuffix = "error.pb" + IndexLookupSuffix = "indexlookup.pb" + // CheckpointPrefix specifies the common prefix that can be used as a directory where all the checkpoint information + // will be stored. This directory is under the raw output-prefix path + CheckpointPrefix = "_flytecheckpoints" +) + +// ConstructCheckpointPath returns a checkpoint path under the given `base` / rawOutputPrefix, following the conventions of +// the store +func ConstructCheckpointPath(store storage.ReferenceConstructor, rawOutputPrefix storage.DataReference) storage.DataReference { + if len(rawOutputPrefix) == 0 { + return "" + } + return constructPath(store, rawOutputPrefix, CheckpointPrefix) +} + +func constructPath(store storage.ReferenceConstructor, base storage.DataReference, suffix string) storage.DataReference { + res, err := store.ConstructReference(context.Background(), base, suffix) + if err != nil { + logger.Errorf(context.Background(), "Failed to construct path. Base[%v] Error: %v", base, err) + } + + return res +} + +// GetTaskTemplatePath returns a protobuf file path where TaskTemplate is stored +func GetTaskTemplatePath(ctx context.Context, store storage.ReferenceConstructor, base storage.DataReference) (storage.DataReference, error) { + return store.ConstructReference(ctx, base, TaskTemplateSuffix) +} + +// GetIndexLookupPath returns the indexpath suffixed to IndexLookupSuffix +func GetIndexLookupPath(ctx context.Context, store storage.ReferenceConstructor, prefix storage.DataReference) (res storage.DataReference, err error) { + return store.ConstructReference(ctx, prefix, IndexLookupSuffix) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/paths_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/paths_test.go new file mode 100644 index 0000000000..8f61c0810d --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/paths_test.go @@ -0,0 +1,34 @@ +package ioutils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +func TestConstructCheckpointPath(t *testing.T) { + store := storage.URLPathConstructor{} + assert.Equal(t, ConstructCheckpointPath(store, "s3://my-bucket/base"), + storage.DataReference("s3://my-bucket/base/_flytecheckpoints")) + assert.Equal(t, ConstructCheckpointPath(store, "s3://my-bucket/base2/"), + storage.DataReference("s3://my-bucket/base2/_flytecheckpoints")) + assert.Equal(t, ConstructCheckpointPath(store, ""), + storage.DataReference("")) +} + +func TestGetTaskTemplatePath(t *testing.T) { + store := storage.URLPathConstructor{} + tmpl, err := GetTaskTemplatePath(context.TODO(), store, "s3://abc") + assert.NoError(t, err) + assert.Equal(t, tmpl, storage.DataReference("s3://abc/task.pb")) +} + +func TestGetIndexLookupPath(t *testing.T) { + store := storage.URLPathConstructor{} + tmpl, err := GetIndexLookupPath(context.TODO(), store, "s3://abc") + assert.NoError(t, err) + assert.Equal(t, tmpl, storage.DataReference("s3://abc/indexlookup.pb")) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go new file mode 100644 index 0000000000..2a20272f6e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector.go @@ -0,0 +1,73 @@ +package ioutils + +import ( + "context" + "hash/fnv" + "strings" + + "github.com/pkg/errors" +) + +// Generates the entire latin alphabet and appends it to the passed in array and returns the new array +func GenerateAlphabet(b []rune) []rune { + for i := 'a'; i <= 'z'; i++ { + b = append(b, i) + } + return b +} + +// Generates all arabic numerals and appends to the passed in array and returns the new array/slice +func GenerateArabicNumerals(b []rune) []rune { + for i := '0'; i <= '9'; i++ { + b = append(b, i) + } + return b +} + +func createAlphabetAndNumerals() []rune { + b := make([]rune, 0, 36) + b = GenerateAlphabet(b) + return GenerateArabicNumerals(b) +} + +// this sharder distributes data into one of the precomputed buckets. The bucket is deterministically determined given the input s +type PrecomputedShardSelector struct { + precomputedPrefixes []string + buckets uint32 +} + +// Generates deterministic shard id for the given string s +func (d *PrecomputedShardSelector) GetShardPrefix(_ context.Context, s []byte) (string, error) { + h := fnv.New32a() + _, err := h.Write(s) + if err != nil { + return "", errors.Wrap(err, "failed to create shard prefix, reason hash failure.") + } + idx := h.Sum32() % d.buckets + return d.precomputedPrefixes[idx], nil +} + +// Creates a PrecomputedShardSelector with 36*36 unique shards. Each shard is of the format {[0-9a-z][0-9a-z]}, i.e. 2 character long. +func NewBase36PrefixShardSelector(ctx context.Context) (ShardSelector, error) { + permittedChars := createAlphabetAndNumerals() + n := len(permittedChars) + precomputedPrefixes := make([]string, 0, n*n) + for _, c1 := range permittedChars { + for _, c2 := range permittedChars { + sb := strings.Builder{} + sb.WriteRune(c1) + sb.WriteRune(c2) + precomputedPrefixes = append(precomputedPrefixes, sb.String()) + } + } + + return NewConstantShardSelector(precomputedPrefixes), nil +} + +// uses the given shards to select a shard +func NewConstantShardSelector(shards []string) ShardSelector { + return &PrecomputedShardSelector{ + precomputedPrefixes: shards, + buckets: uint32(len(shards)), + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector_test.go new file mode 100644 index 0000000000..d988be1309 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/precomputed_shardselector_test.go @@ -0,0 +1,61 @@ +package ioutils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPrecomputedShardSelector_GetShardPrefix(t *testing.T) { + ctx := context.TODO() + t.Run("single-shard", func(t *testing.T) { + ss := PrecomputedShardSelector{precomputedPrefixes: []string{"x"}, buckets: 1} + p, err := ss.GetShardPrefix(ctx, []byte("abc")) + assert.NoError(t, err) + assert.Equal(t, "x", p) + }) + + t.Run("two-shards", func(t *testing.T) { + ss := PrecomputedShardSelector{precomputedPrefixes: []string{"x", "y"}, buckets: 2} + p, err := ss.GetShardPrefix(ctx, []byte("abc")) + assert.NoError(t, err) + assert.Equal(t, "y", p) + p, err = ss.GetShardPrefix(ctx, []byte("xyz")) + assert.NoError(t, err) + assert.Equal(t, "x", p) + }) +} + +func TestGenerateAlphabet(t *testing.T) { + var b []rune + b = GenerateAlphabet(b) + + assert.Equal(t, 26, len(b)) + assert.Equal(t, 'a', b[0]) + assert.Equal(t, 'z', b[25]) + + // Additive + b = GenerateAlphabet(b) + + assert.Equal(t, 52, len(b)) + assert.Equal(t, 'a', b[26]) + assert.Equal(t, 'z', b[51]) +} + +func TestGenerateArabicNumerals(t *testing.T) { + var b []rune + b = GenerateArabicNumerals(b) + + assert.Equal(t, 10, len(b)) + assert.Equal(t, '0', b[0]) + assert.Equal(t, '9', b[9]) + + // Additive + b = GenerateArabicNumerals(b) + assert.Equal(t, 20, len(b)) + assert.Equal(t, '0', b[0]) + assert.Equal(t, '9', b[9]) + assert.Equal(t, '0', b[10]) + assert.Equal(t, '9', b[19]) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path.go new file mode 100644 index 0000000000..07199496ff --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path.go @@ -0,0 +1,100 @@ +package ioutils + +import ( + "context" + "crypto/sha1" // #nosec + "encoding/hex" + "strconv" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + core2 "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type precomputedRawOutputPaths struct { + path storage.DataReference +} + +func (r precomputedRawOutputPaths) GetRawOutputPrefix() storage.DataReference { + return r.path +} + +// Creates a deterministic RawOutputPath whose path is distributed based on the ShardSelector passed in. +// Determinism depends on the outputMetadataPath +// Potential performance problem, as creating a new RawPath creation may be expensive as it hashes the outputMetadataPath +// the final RawOutputPath is created in the shard selected by the sharder at the basePath and then appended by a hashed value of the outputMetadata +func NewShardedDeterministicRawOutputPath(ctx context.Context, sharder ShardSelector, basePrefix, outputMetadataPrefix storage.DataReference, store storage.ReferenceConstructor) (io.RawOutputPaths, error) { + o := []byte(outputMetadataPrefix) + prefix, err := sharder.GetShardPrefix(ctx, o) + if err != nil { + return nil, err + } + /* #nosec */ + // We use SHA1 for sheer speed instead of no collisions. As because of the shard Prefix + hash is pretty unique :) + m := sha1.New() + if _, err := m.Write(o); err != nil { + return nil, err + } + path, err := store.ConstructReference(ctx, basePrefix, prefix, hex.EncodeToString(m.Sum(nil))) + if err != nil { + return nil, err + } + return precomputedRawOutputPaths{ + path: path, + }, nil +} + +// A simple Output sandbox at a given path +func NewRawOutputPaths(_ context.Context, rawOutputPrefix storage.DataReference) io.RawOutputPaths { + return precomputedRawOutputPaths{path: rawOutputPrefix} +} + +// Creates an OutputSandbox in the basePath using the uniqueID and a sharder +// This implementation is faster than the Randomized strategy +// This returns a path in the format of protocol:///{bucket}/{shard}/{optional_suffix_path_parts}/{exec-id}-n0-0/ +func NewShardedRawOutputPath(ctx context.Context, sharder ShardSelector, basePath storage.DataReference, suffixPathParts []string, uniqueID string, store storage.ReferenceConstructor) (io.RawOutputPaths, error) { + o := []byte(uniqueID) + prefix, err := sharder.GetShardPrefix(ctx, o) + if err != nil { + return nil, err + } + suffix := []string{prefix} + suffix = append(suffix, suffixPathParts...) + suffix = append(suffix, uniqueID) + path, err := store.ConstructReference(ctx, basePath, suffix...) + if err != nil { + return nil, err + } + return precomputedRawOutputPaths{ + path: path, + }, nil +} + +// Constructs an output path that is deterministic and unique within the given outputPrefix. No sharding is performed +func NewDeterministicUniqueRawOutputPath(ctx context.Context, rawOutputPrefix, outputMetadataPrefix storage.DataReference, store storage.ReferenceConstructor) (io.RawOutputPaths, error) { + o := []byte(outputMetadataPrefix) + /* #nosec */ + // We use SHA1 for sheer speed instead of no collisions. As because of the shard Prefix + hash is pretty unique :) + m := sha1.New() + if _, err := m.Write(o); err != nil { + return nil, err + } + path, err := store.ConstructReference(ctx, rawOutputPrefix, hex.EncodeToString(m.Sum(nil))) + if err != nil { + return nil, err + } + return precomputedRawOutputPaths{ + path: path, + }, nil +} + +// Generates a RawOutput Path that looks like the TaskExecutionID and can be easily cross referenced with Flyte generated TaskExecution ID +func NewTaskIDRawOutputPath(ctx context.Context, rawOutputPrefix storage.DataReference, taskID *core2.TaskExecutionIdentifier, store storage.ReferenceConstructor) (io.RawOutputPaths, error) { + path, err := store.ConstructReference(ctx, rawOutputPrefix, taskID.GetNodeExecutionId().GetExecutionId().GetProject(), taskID.GetNodeExecutionId().GetExecutionId().GetDomain(), taskID.GetNodeExecutionId().GetExecutionId().GetName(), taskID.GetNodeExecutionId().GetNodeId(), strconv.Itoa(int(taskID.GetRetryAttempt())), taskID.GetTaskId().GetName()) + if err != nil { + return nil, err + } + return precomputedRawOutputPaths{ + path: path, + }, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path_test.go new file mode 100644 index 0000000000..bb8caa7ff9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/raw_output_path_test.go @@ -0,0 +1,89 @@ +package ioutils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + core2 "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestNewOutputSandbox(t *testing.T) { + assert.Equal(t, NewRawOutputPaths(context.TODO(), "x").GetRawOutputPrefix(), storage.DataReference("x")) +} + +func TestNewShardedDeterministicRawOutputPath(t *testing.T) { + ctx := context.TODO() + + t.Run("success-path", func(t *testing.T) { + ss := NewConstantShardSelector([]string{"x"}) + sd, err := NewShardedDeterministicRawOutputPath(ctx, ss, "s3://bucket", "m", storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, storage.DataReference("s3://bucket/x/6b0d31c0d563223024da45691584643ac78c96e8"), sd.GetRawOutputPrefix()) + }) + + t.Run("error", func(t *testing.T) { + ss := NewConstantShardSelector([]string{"s3:// abc"}) + sd, err := NewShardedDeterministicRawOutputPath(ctx, ss, "s3://bucket", "m", storage.URLPathConstructor{}) + assert.Error(t, err, "%s", sd) + }) +} + +func TestNewShardedRawOutputPath(t *testing.T) { + ctx := context.TODO() + t.Run("", func(t *testing.T) { + ss := NewConstantShardSelector([]string{"x"}) + sd, err := NewShardedRawOutputPath(ctx, ss, "s3://flyte", make([]string, 0), "unique", storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, storage.DataReference("s3://flyte/x/unique"), sd.GetRawOutputPrefix()) + }) + t.Run("with suffix", func(t *testing.T) { + ss := NewConstantShardSelector([]string{"x"}) + sd, err := NewShardedRawOutputPath(ctx, ss, "s3://flyte", []string{"suffix"}, "unique", storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, storage.DataReference("s3://flyte/x/suffix/unique"), sd.GetRawOutputPrefix()) + }) + + t.Run("error", func(t *testing.T) { + ss := NewConstantShardSelector([]string{"s3:// abc"}) + sd, err := NewShardedRawOutputPath(ctx, ss, "s3://bucket", make([]string, 0), "m", storage.URLPathConstructor{}) + assert.Error(t, err, "%s", sd) + }) +} + +func TestNewDeterministicUniqueRawOutputPath(t *testing.T) { + ctx := context.TODO() + + t.Run("success-path", func(t *testing.T) { + sd, err := NewDeterministicUniqueRawOutputPath(ctx, "s3://bucket", "m", storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, storage.DataReference("s3://bucket/6b0d31c0d563223024da45691584643ac78c96e8"), sd.GetRawOutputPrefix()) + }) + + t.Run("error-not-possible", func(t *testing.T) { + sd, err := NewDeterministicUniqueRawOutputPath(ctx, "bucket", "m", storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, "/bucket/6b0d31c0d563223024da45691584643ac78c96e8", sd.GetRawOutputPrefix().String()) + }) +} + +func TestNewTaskIDRawOutputPath(t *testing.T) { + p, err := NewTaskIDRawOutputPath(context.TODO(), "s3://bucket", &core2.TaskExecutionIdentifier{ + NodeExecutionId: &core2.NodeExecutionIdentifier{ + NodeId: "n1", + ExecutionId: &core2.WorkflowExecutionIdentifier{ + Project: "project", + Domain: "domain", + Name: "exec", + }, + }, + RetryAttempt: 0, + TaskId: &core2.Identifier{ + Name: "task1", + }, + }, storage.URLPathConstructor{}) + assert.NoError(t, err) + assert.Equal(t, "s3://bucket/project/domain/exec/n1/0/task1", p.GetRawOutputPrefix().String()) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader.go new file mode 100644 index 0000000000..04608d8cc2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader.go @@ -0,0 +1,63 @@ +package ioutils + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + ErrFailedRead string = "READ_FAILED" +) + +var ( + // Ensure we get an early build break if interface changes and these classes do not conform. + _ io.InputFilePaths = SimpleInputFilePath{} + _ io.InputReader = RemoteFileInputReader{} +) + +type RemoteFileInputReader struct { + io.InputFilePaths + store storage.ProtobufStore +} + +func (r RemoteFileInputReader) Get(ctx context.Context) (*core.LiteralMap, error) { + d := &core.LiteralMap{} + if err := r.store.ReadProtobuf(ctx, r.InputFilePaths.GetInputPath(), d); err != nil { + // TODO change flytestdlib to return protobuf unmarshal errors separately. As this can indicate malformed output and we should catch that + return nil, errors.Wrapf(ErrFailedRead, err, "failed to read data from dataDir [%v].", r.InputFilePaths.GetInputPath()) + } + + return d, nil + +} + +func NewRemoteFileInputReader(_ context.Context, store storage.ProtobufStore, inputPaths io.InputFilePaths) RemoteFileInputReader { + return RemoteFileInputReader{ + InputFilePaths: inputPaths, + store: store, + } +} + +type SimpleInputFilePath struct { + pathPrefix storage.DataReference + store storage.ReferenceConstructor +} + +func (s SimpleInputFilePath) GetInputPrefixPath() storage.DataReference { + return s.pathPrefix +} + +func (s SimpleInputFilePath) GetInputPath() storage.DataReference { + return constructPath(s.store, s.pathPrefix, InputsSuffix) +} + +func NewInputFilePaths(_ context.Context, store storage.ReferenceConstructor, inputPathPrefix storage.DataReference) SimpleInputFilePath { + return SimpleInputFilePath{ + store: store, + pathPrefix: inputPathPrefix, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader_test.go new file mode 100644 index 0000000000..3e9476b2f1 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_input_reader_test.go @@ -0,0 +1,18 @@ +package ioutils + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +func TestSimpleInputFilePath_GetInputPath(t *testing.T) { + s := SimpleInputFilePath{ + pathPrefix: "s3://flyteorg-modelbuilder/metadata/propeller/staging/flyteexamples-development-jf193q0cqo/odd-nums-task/data", + store: storage.URLPathConstructor{}, + } + + assert.Equal(t, "s3://flyteorg-modelbuilder/metadata/propeller/staging/flyteexamples-development-jf193q0cqo/odd-nums-task/data/inputs.pb", s.GetInputPath().String()) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go new file mode 100644 index 0000000000..b1f9a61ade --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader.go @@ -0,0 +1,139 @@ +package ioutils + +import ( + "context" + "fmt" + + "github.com/pkg/errors" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type RemoteFileOutputReader struct { + OutPath io.OutputFilePaths + store storage.ComposedProtobufStore + maxPayloadSize int64 +} + +var _ io.OutputReader = RemoteFileOutputReader{} + +func (r RemoteFileOutputReader) IsError(ctx context.Context) (bool, error) { + metadata, err := r.store.Head(ctx, r.OutPath.GetErrorPath()) + if err != nil { + return false, errors.Wrapf(err, "failed to read error file @[%s]", r.OutPath.GetErrorPath()) + } + if metadata.Exists() { + if metadata.Size() > r.maxPayloadSize { + return false, errors.Wrapf(err, "error file @[%s] is too large [%d] bytes, max allowed [%d] bytes", r.OutPath.GetErrorPath(), metadata.Size(), r.maxPayloadSize) + } + return true, nil + } + return false, nil +} + +func (r RemoteFileOutputReader) ReadError(ctx context.Context) (io.ExecutionError, error) { + errorDoc := &core.ErrorDocument{} + err := r.store.ReadProtobuf(ctx, r.OutPath.GetErrorPath(), errorDoc) + if err != nil { + if storage.IsNotFound(err) { + return io.ExecutionError{ + IsRecoverable: true, + ExecutionError: &core.ExecutionError{ + Code: "ErrorFileNotFound", + Message: err.Error(), + Kind: core.ExecutionError_SYSTEM, + }, + }, nil + } + return io.ExecutionError{}, errors.Wrapf(err, "failed to read error data from task @[%s]", r.OutPath.GetErrorPath()) + } + + if errorDoc.Error == nil { + return io.ExecutionError{ + IsRecoverable: true, + ExecutionError: &core.ExecutionError{ + Code: "ErrorFileBadFormat", + Message: fmt.Sprintf("error not formatted correctly, nil error @path [%s]", r.OutPath.GetErrorPath()), + Kind: core.ExecutionError_SYSTEM, + }, + }, nil + } + + ee := io.ExecutionError{ + ExecutionError: &core.ExecutionError{ + Code: errorDoc.Error.Code, + Message: errorDoc.Error.Message, + Kind: errorDoc.Error.Origin, + }, + } + + if errorDoc.Error.Kind == core.ContainerError_RECOVERABLE { + ee.IsRecoverable = true + } + + return ee, nil +} + +func (r RemoteFileOutputReader) Exists(ctx context.Context) (bool, error) { + md, err := r.store.Head(ctx, r.OutPath.GetOutputPath()) + if err != nil { + return false, err + } + if md.Exists() { + if md.Size() > r.maxPayloadSize { + return false, errors.Errorf("output file @[%s] is too large [%d] bytes, max allowed [%d] bytes", r.OutPath.GetOutputPath(), md.Size(), r.maxPayloadSize) + } + return true, nil + } + return false, nil +} + +func (r RemoteFileOutputReader) Read(ctx context.Context) (*core.LiteralMap, *io.ExecutionError, error) { + + d := &core.LiteralMap{} + if err := r.store.ReadProtobuf(ctx, r.OutPath.GetOutputPath(), d); err != nil { + // TODO change flytestdlib to return protobuf unmarshal errors separately. As this can indicate malformed output and we should catch that + return nil, nil, fmt.Errorf("failed to read data from dataDir [%v]. Error: %v", r.OutPath.GetOutputPath(), err) + } + + if d.Literals == nil { + return nil, &io.ExecutionError{ + IsRecoverable: true, + ExecutionError: &core.ExecutionError{ + Code: "No outputs produced", + Message: fmt.Sprintf("outputs not found at [%s]", r.OutPath.GetOutputPath()), + }, + }, nil + } + + return d, nil, nil +} + +func (r RemoteFileOutputReader) IsFile(ctx context.Context) bool { + return true +} + +func (r RemoteFileOutputReader) DeckExists(ctx context.Context) (bool, error) { + md, err := r.store.Head(ctx, r.OutPath.GetDeckPath()) + if err != nil { + return false, err + } + + return md.Exists(), nil +} + +func NewRemoteFileOutputReader(_ context.Context, store storage.ComposedProtobufStore, outPaths io.OutputFilePaths, maxDatasetSize int64) RemoteFileOutputReader { + // Note: even though the data store retrieval checks against GetLimitMegabytes, there might be external + // storage implementations, so we keep this check here as well. + maxPayloadSize := maxDatasetSize + if maxPayloadSize == 0 { + maxPayloadSize = storage.GetConfig().Limits.GetLimitMegabytes * 1024 * 1024 + } + return RemoteFileOutputReader{ + OutPath: outPaths, + store: store, + maxPayloadSize: maxPayloadSize, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go new file mode 100644 index 0000000000..154f9cae06 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_reader_test.go @@ -0,0 +1,109 @@ +package ioutils + +import ( + "context" + "testing" + + pluginsIOMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + storageMocks "github.com/flyteorg/flyte/v2/flytestdlib/storage/mocks" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/runtime/protoiface" +) + +type MemoryMetadata struct { + exists bool + size int64 + etag string + contentMD5 string +} + +func (m MemoryMetadata) ContentMD5() string { + return m.contentMD5 +} + +func (m MemoryMetadata) Size() int64 { + return m.size +} + +func (m MemoryMetadata) Exists() bool { + return m.exists +} + +func (m MemoryMetadata) Etag() string { + return m.etag +} + +func TestReadOrigin(t *testing.T) { + ctx := context.TODO() + + opath := &pluginsIOMock.OutputFilePaths{} + opath.OnGetErrorPath().Return("") + deckPath := "deck.html" + opath.OnGetDeckPath().Return(storage.DataReference(deckPath)) + + t.Run("user", func(t *testing.T) { + errorDoc := &core.ErrorDocument{ + Error: &core.ContainerError{ + Code: "red", + Message: "hi", + Kind: core.ContainerError_NON_RECOVERABLE, + Origin: core.ExecutionError_USER, + }, + } + store := &storageMocks.ComposedProtobufStore{} + store.EXPECT().ReadProtobuf(mock.Anything, mock.Anything, mock.Anything).Run(func(ctx context.Context, ref storage.DataReference, msg protoiface.MessageV1) { + assert.NotNil(t, msg) + casted := msg.(*core.ErrorDocument) + casted.Error = errorDoc.Error + }).Return(nil) + + store.EXPECT().Head(ctx, storage.DataReference("deck.html")).Return(MemoryMetadata{ + exists: true, + }, nil) + + r := RemoteFileOutputReader{ + OutPath: opath, + store: store, + maxPayloadSize: 0, + } + + ee, err := r.ReadError(ctx) + assert.NoError(t, err) + assert.Equal(t, core.ExecutionError_USER, ee.Kind) + assert.False(t, ee.IsRecoverable) + exists, err := r.DeckExists(ctx) + assert.NoError(t, err) + assert.True(t, exists) + }) + + t.Run("system", func(t *testing.T) { + errorDoc := &core.ErrorDocument{ + Error: &core.ContainerError{ + Code: "red", + Message: "hi", + Kind: core.ContainerError_RECOVERABLE, + Origin: core.ExecutionError_SYSTEM, + }, + } + store := &storageMocks.ComposedProtobufStore{} + store.EXPECT().ReadProtobuf(mock.Anything, mock.Anything, mock.Anything).Run(func(ctx context.Context, ref storage.DataReference, msg protoiface.MessageV1) { + assert.NotNil(t, msg) + casted := msg.(*core.ErrorDocument) + casted.Error = errorDoc.Error + }).Return(nil) + + r := RemoteFileOutputReader{ + OutPath: opath, + store: store, + maxPayloadSize: 0, + } + + ee, err := r.ReadError(ctx) + assert.NoError(t, err) + assert.Equal(t, core.ExecutionError_SYSTEM, ee.Kind) + assert.True(t, ee.IsRecoverable) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer.go new file mode 100644 index 0000000000..4c3111274b --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer.go @@ -0,0 +1,138 @@ +package ioutils + +import ( + "context" + "fmt" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var ( + _ io.OutputWriter = RemoteFileOutputWriter{} + _ io.OutputFilePaths = RemoteCheckpointPaths{} +) + +// RemoteFileOutputPaths records all metadata output paths / keys on a remote storage system, e.g. S3 / GCS or any other +// key-value store. Theoretically if the storage.DataReference can support BigTable, this will work with it. +type RemoteFileOutputPaths struct { + // All returned paths for the metadata outputs (inputs.pb and outputs.pb) of an input are constructed to under + // this prefix + outputPrefix storage.DataReference + // Implemented of the ReferenceConstructor, used to construct the actual paths + store storage.ReferenceConstructor + // Arbitrary supplied of the RawOutputPath + io.RawOutputPaths +} + +func (w RemoteFileOutputPaths) GetOutputPrefixPath() storage.DataReference { + return w.outputPrefix +} + +func (w RemoteFileOutputPaths) GetOutputPath() storage.DataReference { + return constructPath(w.store, w.outputPrefix, OutputsSuffix) +} + +func (w RemoteFileOutputPaths) GetDeckPath() storage.DataReference { + deckSuffix := GetConfig().RemoteFileOutputPathsConfig.DeckFilename + return constructPath(w.store, w.outputPrefix, deckSuffix) +} + +func (w RemoteFileOutputPaths) GetErrorPath() storage.DataReference { + return constructPath(w.store, w.outputPrefix, ErrorsSuffix) +} + +func (w RemoteFileOutputPaths) GetFuturesPath() storage.DataReference { + return constructPath(w.store, w.outputPrefix, FuturesSuffix) +} + +// RemoteFileOutputWriter adds storage Write APIs to output paths / keys. In retrospect, the `path` should be +// generally replaced with keys +type RemoteFileOutputWriter struct { + io.OutputFilePaths + store storage.ProtobufStore +} + +func (w RemoteFileOutputWriter) Put(ctx context.Context, reader io.OutputReader) error { + literals, executionErr, err := reader.Read(ctx) + if err != nil { + return err + } + + if executionErr != nil { + errorKind := core.ContainerError_RECOVERABLE + if !executionErr.IsRecoverable { + errorKind = core.ContainerError_NON_RECOVERABLE + } + + errDoc := &core.ErrorDocument{ + Error: &core.ContainerError{ + Code: executionErr.Code, + Message: executionErr.Message, + Kind: errorKind, + }, + } + + return w.store.WriteProtobuf(ctx, w.GetErrorPath(), storage.Options{}, errDoc) + } + + if literals != nil { + return w.store.WriteProtobuf(ctx, w.GetOutputPath(), storage.Options{}, literals) + } + + return fmt.Errorf("no data found to write") +} + +// RemoteCheckpointPaths implements the CheckpointPaths Interface and adds on top of the OutputFilePaths Interface +type RemoteCheckpointPaths struct { + RemoteFileOutputPaths + + previousPath storage.DataReference + + store storage.ReferenceConstructor +} + +// GetPreviousCheckpointsPrefix returns the Prefix path for checkpoints for the previous attempt, or "" if this is +// the first attempt +func (r RemoteCheckpointPaths) GetPreviousCheckpointsPrefix() storage.DataReference { + return r.previousPath +} + +// GetCheckpointPrefix returns a new checkpoint path under the raw output prefix. +func (r RemoteCheckpointPaths) GetCheckpointPrefix() storage.DataReference { + return ConstructCheckpointPath(r.store, r.GetRawOutputPrefix()) +} + +// NewRemoteFileOutputPaths returns a RemoteFileOutputPaths object, where all the paths are configured using the given +// outputPrefix and constructed using the storage.ReferenceConstructor +func NewRemoteFileOutputPaths(_ context.Context, store storage.ReferenceConstructor, outputPrefix storage.DataReference, sandbox io.RawOutputPaths) RemoteFileOutputPaths { + return RemoteFileOutputPaths{ + store: store, + outputPrefix: outputPrefix, + RawOutputPaths: sandbox, + } +} + +// NewCheckpointRemoteFilePaths returns a new object constructed with an optional previousCheckpointPath and derives a new checkpointPath from the outputPrefix +func NewCheckpointRemoteFilePaths(ctx context.Context, store storage.ReferenceConstructor, outputPrefix storage.DataReference, sandbox io.RawOutputPaths, previousCheckpointPath storage.DataReference) RemoteCheckpointPaths { + return RemoteCheckpointPaths{ + previousPath: previousCheckpointPath, + store: store, + RemoteFileOutputPaths: NewRemoteFileOutputPaths(ctx, store, outputPrefix, sandbox), + } +} + +// NewReadOnlyOutputFilePaths can be used when data is only to be read from an existing remote location +func NewReadOnlyOutputFilePaths(ctx context.Context, store storage.ReferenceConstructor, outputPrefix storage.DataReference) RemoteCheckpointPaths { + return NewCheckpointRemoteFilePaths(ctx, store, outputPrefix, nil, "") +} + +// NewRemoteFileOutputWriter returns a writer that records all outputs to remote files / objects. Given outputs, +// it will automatically write it to the outputFile / key that is configured. +func NewRemoteFileOutputWriter(_ context.Context, store storage.ProtobufStore, outputFilePaths io.OutputFilePaths) RemoteFileOutputWriter { + return RemoteFileOutputWriter{ + OutputFilePaths: outputFilePaths, + store: store, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer_test.go new file mode 100644 index 0000000000..e7d7f38593 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/remote_file_output_writer_test.go @@ -0,0 +1,49 @@ +package ioutils + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +func TestRemoteFileOutputWriter(t *testing.T) { + ctx := context.TODO() + memStore, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.Nil(t, err) + + outputPrefix := storage.DataReference("output") + rawOutputPrefix := storage.DataReference("sandbox") + previousCheckpointPath := storage.DataReference("checkpoint") + + checkpointPath := NewCheckpointRemoteFilePaths( + ctx, + memStore, + outputPrefix, + NewRawOutputPaths(ctx, rawOutputPrefix), + previousCheckpointPath, + ) + + t.Run("Test NewCheckpointRemoteFilePaths", func(t *testing.T) { + assert.Equal(t, previousCheckpointPath, checkpointPath.GetPreviousCheckpointsPrefix()) + assert.Equal(t, outputPrefix, checkpointPath.GetOutputPrefixPath()) + + assert.Equal(t, constructPath(memStore, rawOutputPrefix, CheckpointPrefix), checkpointPath.GetCheckpointPrefix()) + assert.Equal(t, constructPath(memStore, outputPrefix, OutputsSuffix), checkpointPath.GetOutputPath()) + assert.Equal(t, constructPath(memStore, outputPrefix, "deck.html"), checkpointPath.GetDeckPath()) + assert.Equal(t, constructPath(memStore, outputPrefix, ErrorsSuffix), checkpointPath.GetErrorPath()) + assert.Equal(t, constructPath(memStore, outputPrefix, FuturesSuffix), checkpointPath.GetFuturesPath()) + }) + + t.Run("Test NewRemoteFileOutputWriter", func(t *testing.T) { + p := NewRemoteFileOutputWriter(ctx, memStore, checkpointPath) + + assert.Equal(t, constructPath(memStore, rawOutputPrefix, CheckpointPrefix), p.GetCheckpointPrefix()) + assert.Equal(t, constructPath(memStore, outputPrefix, OutputsSuffix), p.GetOutputPath()) + assert.Equal(t, constructPath(memStore, outputPrefix, "deck.html"), p.GetDeckPath()) + assert.Equal(t, constructPath(memStore, outputPrefix, ErrorsSuffix), p.GetErrorPath()) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader.go new file mode 100644 index 0000000000..4fe685bac9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader.go @@ -0,0 +1,59 @@ +package ioutils + +import ( + "context" + + "github.com/pkg/errors" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flytestdlib/atomic" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var ( + _ pluginsCore.TaskReader = &lazyUploadingTaskReader{} +) + +// SimpleTaskReader provides only the TaskReader interface. This is created to conveniently use the uploading taskreader +// interface +type SimpleTaskReader interface { + Read(ctx context.Context) (*core.TaskTemplate, error) +} + +// lazyUploadingTaskReader provides a lazy interface that uploads the core.TaskTemplate to a configured location, +// only if the location is accessed. This reduces the potential overhead of writing the template +type lazyUploadingTaskReader struct { + SimpleTaskReader + uploaded atomic.Bool + store storage.ProtobufStore + remotePath storage.DataReference +} + +func (r *lazyUploadingTaskReader) Path(ctx context.Context) (storage.DataReference, error) { + // We are using atomic because it is ok to re-upload in some cases. We know that most of the plugins are + // executed in a single go-routine, so chances of a race condition are minimal. + if !r.uploaded.Load() { + t, err := r.SimpleTaskReader.Read(ctx) + if err != nil { + return "", err + } + err = r.store.WriteProtobuf(ctx, r.remotePath, storage.Options{}, t) + if err != nil { + return "", errors.Wrapf(err, "failed to store task template to remote path [%s]", r.remotePath) + } + r.uploaded.Store(true) + } + return r.remotePath, nil +} + +// NewLazyUploadingTaskReader decorates an existing TaskReader and adds a functionality to allow lazily uploading the task template to +// a remote location, only when the location information is accessed +func NewLazyUploadingTaskReader(baseTaskReader SimpleTaskReader, remotePath storage.DataReference, store storage.ProtobufStore) pluginsCore.TaskReader { + return &lazyUploadingTaskReader{ + SimpleTaskReader: baseTaskReader, + uploaded: atomic.NewBool(false), + store: store, + remotePath: remotePath, + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader_test.go b/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader_test.go new file mode 100644 index 0000000000..d13ba58bdd --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/ioutils/task_reader_test.go @@ -0,0 +1,103 @@ +package ioutils + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const dummyPath = storage.DataReference("test") + +func TestLazyUploadingTaskReader_Happy(t *testing.T) { + ttm := &core.TaskTemplate{} + + ctx := context.TODO() + tr := &mocks.TaskReader{} + tr.OnRead(ctx).Return(ttm, nil) + + ds, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, promutils.NewTestScope()) + assert.NoError(t, err) + + ltr := NewLazyUploadingTaskReader(tr, dummyPath, ds) + + x, err := ltr.Read(ctx) + assert.NoError(t, err) + assert.Equal(t, x, ttm) + + p, err := ltr.Path(ctx) + assert.NoError(t, err) + assert.Equal(t, p, dummyPath) + + v, err := ds.Head(ctx, dummyPath) + assert.NoError(t, err) + assert.True(t, v.Exists()) +} + +// test storage.ProtobufStore to test upload failure +type failingProtoStore struct { + storage.ProtobufStore +} + +func (d *failingProtoStore) WriteProtobuf(ctx context.Context, reference storage.DataReference, opts storage.Options, msg proto.Message) error { + return fmt.Errorf("failed") +} + +func TestLazyUploadingTaskReader_TaskWriteFailure(t *testing.T) { + ttm := &core.TaskTemplate{} + + ctx := context.TODO() + tr := &mocks.TaskReader{} + tr.OnRead(ctx).Return(ttm, nil) + + ltr := NewLazyUploadingTaskReader(tr, dummyPath, &failingProtoStore{}) + + x, err := ltr.Read(ctx) + assert.NoError(t, err) + assert.Equal(t, x, ttm) + + p, err := ltr.Path(ctx) + assert.Error(t, err) + assert.Equal(t, p, storage.DataReference("")) +} + +func TestLazyUploadingTaskReader_TaskReadFailure(t *testing.T) { + + ctx := context.TODO() + tr := &mocks.TaskReader{} + tr.OnRead(ctx).Return(nil, fmt.Errorf("read fail")) + + ds, err := storage.NewDataStore(&storage.Config{ + Type: storage.TypeMemory, + }, promutils.NewTestScope()) + assert.NoError(t, err) + + ltr := NewLazyUploadingTaskReader(tr, dummyPath, ds) + + x, err := ltr.Read(ctx) + assert.Error(t, err) + assert.Nil(t, x) + + p, err := ltr.Path(ctx) + assert.Error(t, err) + assert.Equal(t, p, storage.DataReference("")) + + v, err := ds.Head(ctx, dummyPath) + assert.NoError(t, err) + assert.False(t, v.Exists()) +} + +func init() { + labeled.SetMetricKeys(contextutils.ExecIDKey) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/client.go b/flyteplugins/go/tasks/pluginmachinery/k8s/client.go new file mode 100644 index 0000000000..59068cf6bb --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/client.go @@ -0,0 +1,88 @@ +// Simple implementation of a KubeClient that caches reads and falls back +// to make direct API calls on failure. Write calls are not cached. +package k8s + +import ( + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/apiutil" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" +) + +type kubeClient struct { + client client.Client + cache cache.Cache +} + +func (k *kubeClient) GetClient() client.Client { + return k.client +} + +func (k *kubeClient) GetCache() cache.Cache { + return k.cache +} + +func newKubeClient(c client.Client, cache cache.Cache) core.KubeClient { + return &kubeClient{client: c, cache: cache} +} + +type Options struct { + MapperProvider func(*rest.Config) (meta.RESTMapper, error) + CacheOptions *cache.Options + ClientOptions *client.Options +} + +// NewKubeClient creates a new KubeClient that caches reads and falls back to +// make API calls on failure. Write calls are not cached. +func NewKubeClient(config *rest.Config, options Options) (core.KubeClient, error) { + httpClient, err := rest.HTTPClientFor(config) + if err != nil { + return nil, err + } + + if options.MapperProvider == nil { + options.MapperProvider = func(c *rest.Config) (meta.RESTMapper, error) { + return apiutil.NewDynamicRESTMapper(config, httpClient) + } + } + + mapper, err := options.MapperProvider(config) + if err != nil { + return nil, err + } + + if options.CacheOptions == nil { + options.CacheOptions = &cache.Options{ + HTTPClient: httpClient, + Mapper: mapper, + } + } + + cache, err := cache.New(config, *options.CacheOptions) + if err != nil { + return nil, err + } + + if options.ClientOptions == nil { + options.ClientOptions = &client.Options{ + HTTPClient: httpClient, + Mapper: mapper, + } + } + + client, err := client.New(config, *options.ClientOptions) + if err != nil { + return nil, err + } + + return newKubeClient(client, cache), nil +} + +// NewDefaultKubeClient creates a new KubeClient with default options set. +// This client caches reads and falls back to make API calls on failure. Write calls are not cached. +func NewDefaultKubeClient(config *rest.Config) (core.KubeClient, error) { + return NewKubeClient(config, Options{}) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/config.go b/flyteplugins/go/tasks/pluginmachinery/k8s/config.go new file mode 100644 index 0000000000..43ad682276 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/config.go @@ -0,0 +1,58 @@ +package k8s + +import ( + "fmt" + "io/ioutil" + + "github.com/pkg/errors" + restclient "k8s.io/client-go/rest" +) + +type ClusterConfig struct { + Name string `json:"name" pflag:",Friendly name of the remote cluster"` + Endpoint string `json:"endpoint" pflag:", Remote K8s cluster endpoint"` + Auth Auth `json:"auth" pflag:"-, Auth setting for the cluster"` + Enabled bool `json:"enabled" pflag:", Boolean flag to enable or disable"` +} + +type Auth struct { + TokenPath string `json:"tokenPath" pflag:", Token path"` + CaCertPath string `json:"caCertPath" pflag:", Certificate path"` +} + +func (auth Auth) GetCA() ([]byte, error) { + cert, err := ioutil.ReadFile(auth.CaCertPath) + if err != nil { + return nil, errors.Wrap(err, "failed to read k8s CA cert from configured path") + } + return cert, nil +} + +func (auth Auth) GetToken() (string, error) { + token, err := ioutil.ReadFile(auth.TokenPath) + if err != nil { + return "", errors.Wrap(err, "failed to read k8s bearer token from configured path") + } + return string(token), nil +} + +// KubeClientConfig ... +func KubeClientConfig(host string, auth Auth) (*restclient.Config, error) { + tokenString, err := auth.GetToken() + if err != nil { + return nil, errors.New(fmt.Sprintf("Failed to get auth token: %+v", err)) + } + + caCert, err := auth.GetCA() + if err != nil { + return nil, errors.New(fmt.Sprintf("Failed to get auth CA: %+v", err)) + } + + tlsClientConfig := restclient.TLSClientConfig{} + tlsClientConfig.CAData = caCert + return &restclient.Config{ + Host: host, + TLSClientConfig: tlsClientConfig, + BearerToken: tokenString, + }, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin.go b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin.go new file mode 100644 index 0000000000..50312925fc --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin.go @@ -0,0 +1,173 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "sigs.k8s.io/controller-runtime/pkg/client" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + + k8s "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + + mock "github.com/stretchr/testify/mock" +) + +// Plugin is an autogenerated mock type for the Plugin type +type Plugin struct { + mock.Mock +} + +type Plugin_BuildIdentityResource struct { + *mock.Call +} + +func (_m Plugin_BuildIdentityResource) Return(_a0 client.Object, _a1 error) *Plugin_BuildIdentityResource { + return &Plugin_BuildIdentityResource{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Plugin) OnBuildIdentityResource(ctx context.Context, taskCtx core.TaskExecutionMetadata) *Plugin_BuildIdentityResource { + c_call := _m.On("BuildIdentityResource", ctx, taskCtx) + return &Plugin_BuildIdentityResource{Call: c_call} +} + +func (_m *Plugin) OnBuildIdentityResourceMatch(matchers ...interface{}) *Plugin_BuildIdentityResource { + c_call := _m.On("BuildIdentityResource", matchers...) + return &Plugin_BuildIdentityResource{Call: c_call} +} + +// BuildIdentityResource provides a mock function with given fields: ctx, taskCtx +func (_m *Plugin) BuildIdentityResource(ctx context.Context, taskCtx core.TaskExecutionMetadata) (client.Object, error) { + ret := _m.Called(ctx, taskCtx) + + var r0 client.Object + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionMetadata) client.Object); ok { + r0 = rf(ctx, taskCtx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Object) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, core.TaskExecutionMetadata) error); ok { + r1 = rf(ctx, taskCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Plugin_BuildResource struct { + *mock.Call +} + +func (_m Plugin_BuildResource) Return(_a0 client.Object, _a1 error) *Plugin_BuildResource { + return &Plugin_BuildResource{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Plugin) OnBuildResource(ctx context.Context, taskCtx core.TaskExecutionContext) *Plugin_BuildResource { + c_call := _m.On("BuildResource", ctx, taskCtx) + return &Plugin_BuildResource{Call: c_call} +} + +func (_m *Plugin) OnBuildResourceMatch(matchers ...interface{}) *Plugin_BuildResource { + c_call := _m.On("BuildResource", matchers...) + return &Plugin_BuildResource{Call: c_call} +} + +// BuildResource provides a mock function with given fields: ctx, taskCtx +func (_m *Plugin) BuildResource(ctx context.Context, taskCtx core.TaskExecutionContext) (client.Object, error) { + ret := _m.Called(ctx, taskCtx) + + var r0 client.Object + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionContext) client.Object); ok { + r0 = rf(ctx, taskCtx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Object) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, core.TaskExecutionContext) error); ok { + r1 = rf(ctx, taskCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type Plugin_GetProperties struct { + *mock.Call +} + +func (_m Plugin_GetProperties) Return(_a0 k8s.PluginProperties) *Plugin_GetProperties { + return &Plugin_GetProperties{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnGetProperties() *Plugin_GetProperties { + c_call := _m.On("GetProperties") + return &Plugin_GetProperties{Call: c_call} +} + +func (_m *Plugin) OnGetPropertiesMatch(matchers ...interface{}) *Plugin_GetProperties { + c_call := _m.On("GetProperties", matchers...) + return &Plugin_GetProperties{Call: c_call} +} + +// GetProperties provides a mock function with given fields: +func (_m *Plugin) GetProperties() k8s.PluginProperties { + ret := _m.Called() + + var r0 k8s.PluginProperties + if rf, ok := ret.Get(0).(func() k8s.PluginProperties); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(k8s.PluginProperties) + } + + return r0 +} + +type Plugin_GetTaskPhase struct { + *mock.Call +} + +func (_m Plugin_GetTaskPhase) Return(_a0 core.PhaseInfo, _a1 error) *Plugin_GetTaskPhase { + return &Plugin_GetTaskPhase{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Plugin) OnGetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) *Plugin_GetTaskPhase { + c_call := _m.On("GetTaskPhase", ctx, pluginContext, resource) + return &Plugin_GetTaskPhase{Call: c_call} +} + +func (_m *Plugin) OnGetTaskPhaseMatch(matchers ...interface{}) *Plugin_GetTaskPhase { + c_call := _m.On("GetTaskPhase", matchers...) + return &Plugin_GetTaskPhase{Call: c_call} +} + +// GetTaskPhase provides a mock function with given fields: ctx, pluginContext, resource +func (_m *Plugin) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (core.PhaseInfo, error) { + ret := _m.Called(ctx, pluginContext, resource) + + var r0 core.PhaseInfo + if rf, ok := ret.Get(0).(func(context.Context, k8s.PluginContext, client.Object) core.PhaseInfo); ok { + r0 = rf(ctx, pluginContext, resource) + } else { + r0 = ret.Get(0).(core.PhaseInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, k8s.PluginContext, client.Object) error); ok { + r1 = rf(ctx, pluginContext, resource) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_abort_override.go b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_abort_override.go new file mode 100644 index 0000000000..0f2bbe00c9 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_abort_override.go @@ -0,0 +1,59 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + client "sigs.k8s.io/controller-runtime/pkg/client" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + + k8s "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + + mock "github.com/stretchr/testify/mock" +) + +// PluginAbortOverride is an autogenerated mock type for the PluginAbortOverride type +type PluginAbortOverride struct { + mock.Mock +} + +type PluginAbortOverride_OnAbort struct { + *mock.Call +} + +func (_m PluginAbortOverride_OnAbort) Return(behavior k8s.AbortBehavior, err error) *PluginAbortOverride_OnAbort { + return &PluginAbortOverride_OnAbort{Call: _m.Call.Return(behavior, err)} +} + +func (_m *PluginAbortOverride) OnOnAbort(ctx context.Context, tCtx core.TaskExecutionContext, resource client.Object) *PluginAbortOverride_OnAbort { + c_call := _m.On("OnAbort", ctx, tCtx, resource) + return &PluginAbortOverride_OnAbort{Call: c_call} +} + +func (_m *PluginAbortOverride) OnOnAbortMatch(matchers ...interface{}) *PluginAbortOverride_OnAbort { + c_call := _m.On("OnAbort", matchers...) + return &PluginAbortOverride_OnAbort{Call: c_call} +} + +// OnAbort provides a mock function with given fields: ctx, tCtx, resource +func (_m *PluginAbortOverride) OnAbort(ctx context.Context, tCtx core.TaskExecutionContext, resource client.Object) (k8s.AbortBehavior, error) { + ret := _m.Called(ctx, tCtx, resource) + + var r0 k8s.AbortBehavior + if rf, ok := ret.Get(0).(func(context.Context, core.TaskExecutionContext, client.Object) k8s.AbortBehavior); ok { + r0 = rf(ctx, tCtx, resource) + } else { + r0 = ret.Get(0).(k8s.AbortBehavior) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, core.TaskExecutionContext, client.Object) error); ok { + r1 = rf(ctx, tCtx, resource) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_context.go b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_context.go new file mode 100644 index 0000000000..4a5426e2ef --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/plugin_context.go @@ -0,0 +1,256 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + client "sigs.k8s.io/controller-runtime/pkg/client" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// PluginContext is an autogenerated mock type for the PluginContext type +type PluginContext struct { + mock.Mock +} + +type PluginContext_DataStore struct { + *mock.Call +} + +func (_m PluginContext_DataStore) Return(_a0 *storage.DataStore) *PluginContext_DataStore { + return &PluginContext_DataStore{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnDataStore() *PluginContext_DataStore { + c_call := _m.On("DataStore") + return &PluginContext_DataStore{Call: c_call} +} + +func (_m *PluginContext) OnDataStoreMatch(matchers ...interface{}) *PluginContext_DataStore { + c_call := _m.On("DataStore", matchers...) + return &PluginContext_DataStore{Call: c_call} +} + +// DataStore provides a mock function with given fields: +func (_m *PluginContext) DataStore() *storage.DataStore { + ret := _m.Called() + + var r0 *storage.DataStore + if rf, ok := ret.Get(0).(func() *storage.DataStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storage.DataStore) + } + } + + return r0 +} + +type PluginContext_InputReader struct { + *mock.Call +} + +func (_m PluginContext_InputReader) Return(_a0 io.InputReader) *PluginContext_InputReader { + return &PluginContext_InputReader{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnInputReader() *PluginContext_InputReader { + c_call := _m.On("InputReader") + return &PluginContext_InputReader{Call: c_call} +} + +func (_m *PluginContext) OnInputReaderMatch(matchers ...interface{}) *PluginContext_InputReader { + c_call := _m.On("InputReader", matchers...) + return &PluginContext_InputReader{Call: c_call} +} + +// InputReader provides a mock function with given fields: +func (_m *PluginContext) InputReader() io.InputReader { + ret := _m.Called() + + var r0 io.InputReader + if rf, ok := ret.Get(0).(func() io.InputReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.InputReader) + } + } + + return r0 +} + +type PluginContext_K8sReader struct { + *mock.Call +} + +func (_m PluginContext_K8sReader) Return(_a0 client.Reader) *PluginContext_K8sReader { + return &PluginContext_K8sReader{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnK8sReader() *PluginContext_K8sReader { + c_call := _m.On("K8sReader") + return &PluginContext_K8sReader{Call: c_call} +} + +func (_m *PluginContext) OnK8sReaderMatch(matchers ...interface{}) *PluginContext_K8sReader { + c_call := _m.On("K8sReader", matchers...) + return &PluginContext_K8sReader{Call: c_call} +} + +// K8sReader provides a mock function with given fields: +func (_m *PluginContext) K8sReader() client.Reader { + ret := _m.Called() + + var r0 client.Reader + if rf, ok := ret.Get(0).(func() client.Reader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(client.Reader) + } + } + + return r0 +} + +type PluginContext_OutputWriter struct { + *mock.Call +} + +func (_m PluginContext_OutputWriter) Return(_a0 io.OutputWriter) *PluginContext_OutputWriter { + return &PluginContext_OutputWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnOutputWriter() *PluginContext_OutputWriter { + c_call := _m.On("OutputWriter") + return &PluginContext_OutputWriter{Call: c_call} +} + +func (_m *PluginContext) OnOutputWriterMatch(matchers ...interface{}) *PluginContext_OutputWriter { + c_call := _m.On("OutputWriter", matchers...) + return &PluginContext_OutputWriter{Call: c_call} +} + +// OutputWriter provides a mock function with given fields: +func (_m *PluginContext) OutputWriter() io.OutputWriter { + ret := _m.Called() + + var r0 io.OutputWriter + if rf, ok := ret.Get(0).(func() io.OutputWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.OutputWriter) + } + } + + return r0 +} + +type PluginContext_PluginStateReader struct { + *mock.Call +} + +func (_m PluginContext_PluginStateReader) Return(_a0 core.PluginStateReader) *PluginContext_PluginStateReader { + return &PluginContext_PluginStateReader{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnPluginStateReader() *PluginContext_PluginStateReader { + c_call := _m.On("PluginStateReader") + return &PluginContext_PluginStateReader{Call: c_call} +} + +func (_m *PluginContext) OnPluginStateReaderMatch(matchers ...interface{}) *PluginContext_PluginStateReader { + c_call := _m.On("PluginStateReader", matchers...) + return &PluginContext_PluginStateReader{Call: c_call} +} + +// PluginStateReader provides a mock function with given fields: +func (_m *PluginContext) PluginStateReader() core.PluginStateReader { + ret := _m.Called() + + var r0 core.PluginStateReader + if rf, ok := ret.Get(0).(func() core.PluginStateReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.PluginStateReader) + } + } + + return r0 +} + +type PluginContext_TaskExecutionMetadata struct { + *mock.Call +} + +func (_m PluginContext_TaskExecutionMetadata) Return(_a0 core.TaskExecutionMetadata) *PluginContext_TaskExecutionMetadata { + return &PluginContext_TaskExecutionMetadata{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnTaskExecutionMetadata() *PluginContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata") + return &PluginContext_TaskExecutionMetadata{Call: c_call} +} + +func (_m *PluginContext) OnTaskExecutionMetadataMatch(matchers ...interface{}) *PluginContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata", matchers...) + return &PluginContext_TaskExecutionMetadata{Call: c_call} +} + +// TaskExecutionMetadata provides a mock function with given fields: +func (_m *PluginContext) TaskExecutionMetadata() core.TaskExecutionMetadata { + ret := _m.Called() + + var r0 core.TaskExecutionMetadata + if rf, ok := ret.Get(0).(func() core.TaskExecutionMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionMetadata) + } + } + + return r0 +} + +type PluginContext_TaskReader struct { + *mock.Call +} + +func (_m PluginContext_TaskReader) Return(_a0 core.TaskReader) *PluginContext_TaskReader { + return &PluginContext_TaskReader{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginContext) OnTaskReader() *PluginContext_TaskReader { + c_call := _m.On("TaskReader") + return &PluginContext_TaskReader{Call: c_call} +} + +func (_m *PluginContext) OnTaskReaderMatch(matchers ...interface{}) *PluginContext_TaskReader { + c_call := _m.On("TaskReader", matchers...) + return &PluginContext_TaskReader{Call: c_call} +} + +// TaskReader provides a mock function with given fields: +func (_m *PluginContext) TaskReader() core.TaskReader { + ret := _m.Called() + + var r0 core.TaskReader + if rf, ok := ret.Get(0).(func() core.TaskReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskReader) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/resource.go b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/resource.go new file mode 100644 index 0000000000..85bfa9d21f --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/mocks/resource.go @@ -0,0 +1,730 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + runtime "k8s.io/apimachinery/pkg/runtime" + + schema "k8s.io/apimachinery/pkg/runtime/schema" + + types "k8s.io/apimachinery/pkg/types" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Resource is an autogenerated mock type for the Resource type +type Resource struct { + mock.Mock +} + +type Resource_DeepCopyObject struct { + *mock.Call +} + +func (_m Resource_DeepCopyObject) Return(_a0 runtime.Object) *Resource_DeepCopyObject { + return &Resource_DeepCopyObject{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnDeepCopyObject() *Resource_DeepCopyObject { + c := _m.On("DeepCopyObject") + return &Resource_DeepCopyObject{Call: c} +} + +func (_m *Resource) OnDeepCopyObjectMatch(matchers ...interface{}) *Resource_DeepCopyObject { + c := _m.On("DeepCopyObject", matchers...) + return &Resource_DeepCopyObject{Call: c} +} + +// DeepCopyObject provides a mock function with given fields: +func (_m *Resource) DeepCopyObject() runtime.Object { + ret := _m.Called() + + var r0 runtime.Object + if rf, ok := ret.Get(0).(func() runtime.Object); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(runtime.Object) + } + } + + return r0 +} + +type Resource_GetAnnotations struct { + *mock.Call +} + +func (_m Resource_GetAnnotations) Return(_a0 map[string]string) *Resource_GetAnnotations { + return &Resource_GetAnnotations{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetAnnotations() *Resource_GetAnnotations { + c := _m.On("GetAnnotations") + return &Resource_GetAnnotations{Call: c} +} + +func (_m *Resource) OnGetAnnotationsMatch(matchers ...interface{}) *Resource_GetAnnotations { + c := _m.On("GetAnnotations", matchers...) + return &Resource_GetAnnotations{Call: c} +} + +// GetAnnotations provides a mock function with given fields: +func (_m *Resource) GetAnnotations() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type Resource_GetClusterName struct { + *mock.Call +} + +func (_m Resource_GetClusterName) Return(_a0 string) *Resource_GetClusterName { + return &Resource_GetClusterName{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetClusterName() *Resource_GetClusterName { + c := _m.On("GetClusterName") + return &Resource_GetClusterName{Call: c} +} + +func (_m *Resource) OnGetClusterNameMatch(matchers ...interface{}) *Resource_GetClusterName { + c := _m.On("GetClusterName", matchers...) + return &Resource_GetClusterName{Call: c} +} + +// GetClusterName provides a mock function with given fields: +func (_m *Resource) GetClusterName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetCreationTimestamp struct { + *mock.Call +} + +func (_m Resource_GetCreationTimestamp) Return(_a0 v1.Time) *Resource_GetCreationTimestamp { + return &Resource_GetCreationTimestamp{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetCreationTimestamp() *Resource_GetCreationTimestamp { + c := _m.On("GetCreationTimestamp") + return &Resource_GetCreationTimestamp{Call: c} +} + +func (_m *Resource) OnGetCreationTimestampMatch(matchers ...interface{}) *Resource_GetCreationTimestamp { + c := _m.On("GetCreationTimestamp", matchers...) + return &Resource_GetCreationTimestamp{Call: c} +} + +// GetCreationTimestamp provides a mock function with given fields: +func (_m *Resource) GetCreationTimestamp() v1.Time { + ret := _m.Called() + + var r0 v1.Time + if rf, ok := ret.Get(0).(func() v1.Time); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(v1.Time) + } + + return r0 +} + +type Resource_GetDeletionGracePeriodSeconds struct { + *mock.Call +} + +func (_m Resource_GetDeletionGracePeriodSeconds) Return(_a0 *int64) *Resource_GetDeletionGracePeriodSeconds { + return &Resource_GetDeletionGracePeriodSeconds{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetDeletionGracePeriodSeconds() *Resource_GetDeletionGracePeriodSeconds { + c := _m.On("GetDeletionGracePeriodSeconds") + return &Resource_GetDeletionGracePeriodSeconds{Call: c} +} + +func (_m *Resource) OnGetDeletionGracePeriodSecondsMatch(matchers ...interface{}) *Resource_GetDeletionGracePeriodSeconds { + c := _m.On("GetDeletionGracePeriodSeconds", matchers...) + return &Resource_GetDeletionGracePeriodSeconds{Call: c} +} + +// GetDeletionGracePeriodSeconds provides a mock function with given fields: +func (_m *Resource) GetDeletionGracePeriodSeconds() *int64 { + ret := _m.Called() + + var r0 *int64 + if rf, ok := ret.Get(0).(func() *int64); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*int64) + } + } + + return r0 +} + +type Resource_GetDeletionTimestamp struct { + *mock.Call +} + +func (_m Resource_GetDeletionTimestamp) Return(_a0 *v1.Time) *Resource_GetDeletionTimestamp { + return &Resource_GetDeletionTimestamp{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetDeletionTimestamp() *Resource_GetDeletionTimestamp { + c := _m.On("GetDeletionTimestamp") + return &Resource_GetDeletionTimestamp{Call: c} +} + +func (_m *Resource) OnGetDeletionTimestampMatch(matchers ...interface{}) *Resource_GetDeletionTimestamp { + c := _m.On("GetDeletionTimestamp", matchers...) + return &Resource_GetDeletionTimestamp{Call: c} +} + +// GetDeletionTimestamp provides a mock function with given fields: +func (_m *Resource) GetDeletionTimestamp() *v1.Time { + ret := _m.Called() + + var r0 *v1.Time + if rf, ok := ret.Get(0).(func() *v1.Time); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1.Time) + } + } + + return r0 +} + +type Resource_GetFinalizers struct { + *mock.Call +} + +func (_m Resource_GetFinalizers) Return(_a0 []string) *Resource_GetFinalizers { + return &Resource_GetFinalizers{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetFinalizers() *Resource_GetFinalizers { + c := _m.On("GetFinalizers") + return &Resource_GetFinalizers{Call: c} +} + +func (_m *Resource) OnGetFinalizersMatch(matchers ...interface{}) *Resource_GetFinalizers { + c := _m.On("GetFinalizers", matchers...) + return &Resource_GetFinalizers{Call: c} +} + +// GetFinalizers provides a mock function with given fields: +func (_m *Resource) GetFinalizers() []string { + ret := _m.Called() + + var r0 []string + if rf, ok := ret.Get(0).(func() []string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]string) + } + } + + return r0 +} + +type Resource_GetGenerateName struct { + *mock.Call +} + +func (_m Resource_GetGenerateName) Return(_a0 string) *Resource_GetGenerateName { + return &Resource_GetGenerateName{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetGenerateName() *Resource_GetGenerateName { + c := _m.On("GetGenerateName") + return &Resource_GetGenerateName{Call: c} +} + +func (_m *Resource) OnGetGenerateNameMatch(matchers ...interface{}) *Resource_GetGenerateName { + c := _m.On("GetGenerateName", matchers...) + return &Resource_GetGenerateName{Call: c} +} + +// GetGenerateName provides a mock function with given fields: +func (_m *Resource) GetGenerateName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetGeneration struct { + *mock.Call +} + +func (_m Resource_GetGeneration) Return(_a0 int64) *Resource_GetGeneration { + return &Resource_GetGeneration{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetGeneration() *Resource_GetGeneration { + c := _m.On("GetGeneration") + return &Resource_GetGeneration{Call: c} +} + +func (_m *Resource) OnGetGenerationMatch(matchers ...interface{}) *Resource_GetGeneration { + c := _m.On("GetGeneration", matchers...) + return &Resource_GetGeneration{Call: c} +} + +// GetGeneration provides a mock function with given fields: +func (_m *Resource) GetGeneration() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +type Resource_GetLabels struct { + *mock.Call +} + +func (_m Resource_GetLabels) Return(_a0 map[string]string) *Resource_GetLabels { + return &Resource_GetLabels{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetLabels() *Resource_GetLabels { + c := _m.On("GetLabels") + return &Resource_GetLabels{Call: c} +} + +func (_m *Resource) OnGetLabelsMatch(matchers ...interface{}) *Resource_GetLabels { + c := _m.On("GetLabels", matchers...) + return &Resource_GetLabels{Call: c} +} + +// GetLabels provides a mock function with given fields: +func (_m *Resource) GetLabels() map[string]string { + ret := _m.Called() + + var r0 map[string]string + if rf, ok := ret.Get(0).(func() map[string]string); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + return r0 +} + +type Resource_GetManagedFields struct { + *mock.Call +} + +func (_m Resource_GetManagedFields) Return(_a0 []v1.ManagedFieldsEntry) *Resource_GetManagedFields { + return &Resource_GetManagedFields{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetManagedFields() *Resource_GetManagedFields { + c := _m.On("GetManagedFields") + return &Resource_GetManagedFields{Call: c} +} + +func (_m *Resource) OnGetManagedFieldsMatch(matchers ...interface{}) *Resource_GetManagedFields { + c := _m.On("GetManagedFields", matchers...) + return &Resource_GetManagedFields{Call: c} +} + +// GetManagedFields provides a mock function with given fields: +func (_m *Resource) GetManagedFields() []v1.ManagedFieldsEntry { + ret := _m.Called() + + var r0 []v1.ManagedFieldsEntry + if rf, ok := ret.Get(0).(func() []v1.ManagedFieldsEntry); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]v1.ManagedFieldsEntry) + } + } + + return r0 +} + +type Resource_GetName struct { + *mock.Call +} + +func (_m Resource_GetName) Return(_a0 string) *Resource_GetName { + return &Resource_GetName{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetName() *Resource_GetName { + c := _m.On("GetName") + return &Resource_GetName{Call: c} +} + +func (_m *Resource) OnGetNameMatch(matchers ...interface{}) *Resource_GetName { + c := _m.On("GetName", matchers...) + return &Resource_GetName{Call: c} +} + +// GetName provides a mock function with given fields: +func (_m *Resource) GetName() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetNamespace struct { + *mock.Call +} + +func (_m Resource_GetNamespace) Return(_a0 string) *Resource_GetNamespace { + return &Resource_GetNamespace{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetNamespace() *Resource_GetNamespace { + c := _m.On("GetNamespace") + return &Resource_GetNamespace{Call: c} +} + +func (_m *Resource) OnGetNamespaceMatch(matchers ...interface{}) *Resource_GetNamespace { + c := _m.On("GetNamespace", matchers...) + return &Resource_GetNamespace{Call: c} +} + +// GetNamespace provides a mock function with given fields: +func (_m *Resource) GetNamespace() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetObjectKind struct { + *mock.Call +} + +func (_m Resource_GetObjectKind) Return(_a0 schema.ObjectKind) *Resource_GetObjectKind { + return &Resource_GetObjectKind{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetObjectKind() *Resource_GetObjectKind { + c := _m.On("GetObjectKind") + return &Resource_GetObjectKind{Call: c} +} + +func (_m *Resource) OnGetObjectKindMatch(matchers ...interface{}) *Resource_GetObjectKind { + c := _m.On("GetObjectKind", matchers...) + return &Resource_GetObjectKind{Call: c} +} + +// GetObjectKind provides a mock function with given fields: +func (_m *Resource) GetObjectKind() schema.ObjectKind { + ret := _m.Called() + + var r0 schema.ObjectKind + if rf, ok := ret.Get(0).(func() schema.ObjectKind); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(schema.ObjectKind) + } + } + + return r0 +} + +type Resource_GetOwnerReferences struct { + *mock.Call +} + +func (_m Resource_GetOwnerReferences) Return(_a0 []v1.OwnerReference) *Resource_GetOwnerReferences { + return &Resource_GetOwnerReferences{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetOwnerReferences() *Resource_GetOwnerReferences { + c := _m.On("GetOwnerReferences") + return &Resource_GetOwnerReferences{Call: c} +} + +func (_m *Resource) OnGetOwnerReferencesMatch(matchers ...interface{}) *Resource_GetOwnerReferences { + c := _m.On("GetOwnerReferences", matchers...) + return &Resource_GetOwnerReferences{Call: c} +} + +// GetOwnerReferences provides a mock function with given fields: +func (_m *Resource) GetOwnerReferences() []v1.OwnerReference { + ret := _m.Called() + + var r0 []v1.OwnerReference + if rf, ok := ret.Get(0).(func() []v1.OwnerReference); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]v1.OwnerReference) + } + } + + return r0 +} + +type Resource_GetResourceVersion struct { + *mock.Call +} + +func (_m Resource_GetResourceVersion) Return(_a0 string) *Resource_GetResourceVersion { + return &Resource_GetResourceVersion{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetResourceVersion() *Resource_GetResourceVersion { + c := _m.On("GetResourceVersion") + return &Resource_GetResourceVersion{Call: c} +} + +func (_m *Resource) OnGetResourceVersionMatch(matchers ...interface{}) *Resource_GetResourceVersion { + c := _m.On("GetResourceVersion", matchers...) + return &Resource_GetResourceVersion{Call: c} +} + +// GetResourceVersion provides a mock function with given fields: +func (_m *Resource) GetResourceVersion() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetSelfLink struct { + *mock.Call +} + +func (_m Resource_GetSelfLink) Return(_a0 string) *Resource_GetSelfLink { + return &Resource_GetSelfLink{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetSelfLink() *Resource_GetSelfLink { + c := _m.On("GetSelfLink") + return &Resource_GetSelfLink{Call: c} +} + +func (_m *Resource) OnGetSelfLinkMatch(matchers ...interface{}) *Resource_GetSelfLink { + c := _m.On("GetSelfLink", matchers...) + return &Resource_GetSelfLink{Call: c} +} + +// GetSelfLink provides a mock function with given fields: +func (_m *Resource) GetSelfLink() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_GetUID struct { + *mock.Call +} + +func (_m Resource_GetUID) Return(_a0 types.UID) *Resource_GetUID { + return &Resource_GetUID{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGetUID() *Resource_GetUID { + c := _m.On("GetUID") + return &Resource_GetUID{Call: c} +} + +func (_m *Resource) OnGetUIDMatch(matchers ...interface{}) *Resource_GetUID { + c := _m.On("GetUID", matchers...) + return &Resource_GetUID{Call: c} +} + +// GetUID provides a mock function with given fields: +func (_m *Resource) GetUID() types.UID { + ret := _m.Called() + + var r0 types.UID + if rf, ok := ret.Get(0).(func() types.UID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(types.UID) + } + + return r0 +} + +type Resource_GroupVersionKind struct { + *mock.Call +} + +func (_m Resource_GroupVersionKind) Return(_a0 schema.GroupVersionKind) *Resource_GroupVersionKind { + return &Resource_GroupVersionKind{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnGroupVersionKind() *Resource_GroupVersionKind { + c := _m.On("GroupVersionKind") + return &Resource_GroupVersionKind{Call: c} +} + +func (_m *Resource) OnGroupVersionKindMatch(matchers ...interface{}) *Resource_GroupVersionKind { + c := _m.On("GroupVersionKind", matchers...) + return &Resource_GroupVersionKind{Call: c} +} + +// GroupVersionKind provides a mock function with given fields: +func (_m *Resource) GroupVersionKind() schema.GroupVersionKind { + ret := _m.Called() + + var r0 schema.GroupVersionKind + if rf, ok := ret.Get(0).(func() schema.GroupVersionKind); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(schema.GroupVersionKind) + } + + return r0 +} + +// SetAnnotations provides a mock function with given fields: annotations +func (_m *Resource) SetAnnotations(annotations map[string]string) { + _m.Called(annotations) +} + +// SetClusterName provides a mock function with given fields: clusterName +func (_m *Resource) SetClusterName(clusterName string) { + _m.Called(clusterName) +} + +// SetCreationTimestamp provides a mock function with given fields: timestamp +func (_m *Resource) SetCreationTimestamp(timestamp v1.Time) { + _m.Called(timestamp) +} + +// SetDeletionGracePeriodSeconds provides a mock function with given fields: _a0 +func (_m *Resource) SetDeletionGracePeriodSeconds(_a0 *int64) { + _m.Called(_a0) +} + +// SetDeletionTimestamp provides a mock function with given fields: timestamp +func (_m *Resource) SetDeletionTimestamp(timestamp *v1.Time) { + _m.Called(timestamp) +} + +// SetFinalizers provides a mock function with given fields: finalizers +func (_m *Resource) SetFinalizers(finalizers []string) { + _m.Called(finalizers) +} + +// SetGenerateName provides a mock function with given fields: name +func (_m *Resource) SetGenerateName(name string) { + _m.Called(name) +} + +// SetGeneration provides a mock function with given fields: generation +func (_m *Resource) SetGeneration(generation int64) { + _m.Called(generation) +} + +// SetGroupVersionKind provides a mock function with given fields: kind +func (_m *Resource) SetGroupVersionKind(kind schema.GroupVersionKind) { + _m.Called(kind) +} + +// SetLabels provides a mock function with given fields: labels +func (_m *Resource) SetLabels(labels map[string]string) { + _m.Called(labels) +} + +// SetManagedFields provides a mock function with given fields: managedFields +func (_m *Resource) SetManagedFields(managedFields []v1.ManagedFieldsEntry) { + _m.Called(managedFields) +} + +// SetName provides a mock function with given fields: name +func (_m *Resource) SetName(name string) { + _m.Called(name) +} + +// SetNamespace provides a mock function with given fields: namespace +func (_m *Resource) SetNamespace(namespace string) { + _m.Called(namespace) +} + +// SetOwnerReferences provides a mock function with given fields: _a0 +func (_m *Resource) SetOwnerReferences(_a0 []v1.OwnerReference) { + _m.Called(_a0) +} + +// SetResourceVersion provides a mock function with given fields: version +func (_m *Resource) SetResourceVersion(version string) { + _m.Called(version) +} + +// SetSelfLink provides a mock function with given fields: selfLink +func (_m *Resource) SetSelfLink(selfLink string) { + _m.Called(selfLink) +} + +// SetUID provides a mock function with given fields: uid +func (_m *Resource) SetUID(uid types.UID) { + _m.Called(uid) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go b/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go new file mode 100644 index 0000000000..eadb9f3093 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/k8s/plugin.go @@ -0,0 +1,192 @@ +package k8s + +import ( + "context" + + "sigs.k8s.io/controller-runtime/pkg/client" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +//go:generate mockery -all -case=underscore + +// PluginEntry is a structure that is used to indicate to the system a K8s plugin +type PluginEntry struct { + // ID/Name of the plugin. This will be used to identify this plugin and has to be unique in the entire system + // All functions like enabling and disabling a plugin use this ID + ID pluginsCore.TaskType + // A list of all the task types for which this plugin is applicable. + RegisteredTaskTypes []pluginsCore.TaskType + // An instance of the kubernetes resource this plugin is responsible for, for example v1.Pod{} + ResourceToWatch client.Object + // An instance of the plugin + Plugin Plugin + // Boolean that indicates if this plugin can be used as the default for unknown task types. There can only be + // one default in the system + IsDefault bool + // Returns a new KubeClient to be used instead of the internal controller-runtime client. + CustomKubeClient func(ctx context.Context) (pluginsCore.KubeClient, error) +} + +// System level properties that this Plugin supports +type PluginProperties struct { + // Disables the inclusion of OwnerReferences in kubernetes resources that this plugin is responsible for. + // Disabling is only useful if resources will be created in a remote cluster. + DisableInjectOwnerReferences bool + // Boolean that indicates if finalizer injection should be disabled for resources that this plugin is + // responsible for. + DisableInjectFinalizer bool + // Specifies the length of TaskExecutionID generated name. default: 50 + GeneratedNameMaxLength *int + // DisableDeleteResourceOnFinalize disables deleting the created resource on finalize. That behavior is controllable + // on the base K8sPluginConfig level but can be disabled for individual plugins. Plugins should generally not + // override that behavior unless the resource that gets created for this plugin does not consume resources (cluster's + // cpu/memory... etc. or external resources) once the plugin's Plugin.GetTaskPhase() returns a terminal phase. + DisableDeleteResourceOnFinalize bool +} + +// Special context passed in to plugins when checking task phase +type PluginContext interface { + // Returns a TaskReader, to retrieve task details + TaskReader() pluginsCore.TaskReader + + // Returns an input reader to retrieve input data + InputReader() io.InputReader + + // Provides an output sync of type io.OutputWriter + OutputWriter() io.OutputWriter + + // Returns a handle to the currently configured storage backend that can be used to communicate with the tasks or write metadata + DataStore() *storage.DataStore + + // Returns a handle to the Task's execution metadata. + TaskExecutionMetadata() pluginsCore.TaskExecutionMetadata + + // Returns a reader that retrieves previously stored plugin internal state. the state itself is immutable + PluginStateReader() pluginsCore.PluginStateReader + + // K8sReader returns a read-only k8s client that can fetch pod(s) for given node execution + K8sReader() client.Reader +} + +// PluginState defines the state of a k8s plugin. This information must be maintained between propeller evaluations to +// determine if there have been any updates since the previously evaluation. +type PluginState struct { + // Phase is the plugin phase. + Phase pluginsCore.Phase + // PhaseVersion is an number used to indicate reportable changes to state that have the same phase. + PhaseVersion uint32 + // Reason is the message explaining the purpose for being in the reported state. + Reason string +} + +// Defines a simplified interface to author plugins for k8s resources. +type Plugin interface { + // Defines a func to create a query object (typically just object and type meta portions) that's used to query k8s + // resources. + BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) + + // Defines a func to create the full resource object that will be posted to k8s. + BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) + + // Analyses the k8s resource and reports the status as TaskPhase. This call is expected to be relatively fast, + // any operations that might take a long time (limits are configured system-wide) should be offloaded to the + // background. + GetTaskPhase(ctx context.Context, pluginContext PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) + + // Properties desired by the plugin + GetProperties() PluginProperties +} + +// An optional interface a Plugin can implement to override its default OnAbort finalizer (deletion of the underlying resource). +type PluginAbortOverride interface { + OnAbort(ctx context.Context, tCtx pluginsCore.TaskExecutionContext, resource client.Object) (behavior AbortBehavior, err error) +} + +// Defines the overridden OnAbort behavior. The resource (by default, the underlying resource, although this +// can be overridden) can be either patched, updated, or deleted. +type AbortBehavior struct { + // Optional override to the default k8s Resource being acted on. + Resource client.Object + DeleteResource bool + Update *UpdateResourceOperation + Patch *PatchResourceOperation + // Determines whether to delete the Resource if the specified operations return an error + DeleteOnErr bool +} + +// Defines a Patch operation on a Resource +type PatchResourceOperation struct { + Patch client.Patch + Options []client.PatchOption +} + +// Defines an Update operation on a Resource +type UpdateResourceOperation struct { + Options []client.UpdateOption +} + +// AbortBehavior that patches the default resource +func AbortBehaviorPatchDefaultResource(patchOperation PatchResourceOperation, deleteOnErr bool) AbortBehavior { + return AbortBehaviorPatch(patchOperation, deleteOnErr, nil) +} + +// AbortBehavior that patches the specified resource +func AbortBehaviorPatch(patchOperation PatchResourceOperation, deleteOnErr bool, resource client.Object) AbortBehavior { + return AbortBehavior{ + Resource: resource, + Patch: &patchOperation, + DeleteOnErr: deleteOnErr, + } +} + +// AbortBehavior that updates the default resource +func AbortBehaviorUpdateDefaultResource(updateOperation UpdateResourceOperation, deleteOnErr bool) AbortBehavior { + return AbortBehaviorUpdate(updateOperation, deleteOnErr, nil) +} + +// AbortBehavior that updates the specified resource +func AbortBehaviorUpdate(updateOperation UpdateResourceOperation, deleteOnErr bool, resource client.Object) AbortBehavior { + return AbortBehavior{ + Resource: resource, + Update: &updateOperation, + DeleteOnErr: deleteOnErr, + } +} + +// AbortBehavior that deletes the default resource +func AbortBehaviorDeleteDefaultResource() AbortBehavior { + return AbortBehaviorDelete(nil) +} + +// AbortBehavior that deletes the specified resource +func AbortBehaviorDelete(resource client.Object) AbortBehavior { + return AbortBehavior{ + Resource: resource, + DeleteResource: true, + } +} + +// if we have the same Phase as the previous evaluation and updated the Reason but not the PhaseVersion we must +// update the PhaseVersion so an event is sent to reflect the Reason update. this does not handle the Running +// Phase because the legacy used `DefaultPhaseVersion + 1` which will only increment to 1. + +func MaybeUpdatePhaseVersion(phaseInfo *pluginsCore.PhaseInfo, pluginState *PluginState) { + if phaseInfo.Phase() == pluginState.Phase && + phaseInfo.Version() <= pluginState.PhaseVersion && phaseInfo.Reason() != pluginState.Reason { + + *phaseInfo = phaseInfo.WithVersion(pluginState.PhaseVersion + 1) + } +} + +func MaybeUpdatePhaseVersionFromPluginContext(phaseInfo *pluginsCore.PhaseInfo, pluginContext *PluginContext) error { + pluginState := PluginState{} + _, err := (*pluginContext).PluginStateReader().Get(&pluginState) + if err != nil { + return err + } + MaybeUpdatePhaseVersion(phaseInfo, &pluginState) + return nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/registry.go b/flyteplugins/go/tasks/pluginmachinery/registry.go new file mode 100644 index 0000000000..520cf12ba1 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/registry.go @@ -0,0 +1,110 @@ +package pluginmachinery + +import ( + "context" + "sync" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + internalRemote "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/internal/webapi" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" +) + +type taskPluginRegistry struct { + m sync.Mutex + k8sPlugin []k8s.PluginEntry + corePlugin []core.PluginEntry +} + +// A singleton variable that maintains a registry of all plugins. The framework uses this to access all plugins +var pluginRegistry = &taskPluginRegistry{} + +func PluginRegistry() TaskPluginRegistry { + return pluginRegistry +} + +func (p *taskPluginRegistry) RegisterRemotePlugin(info webapi.PluginEntry) { + ctx := context.Background() + if info.ID == "" { + logger.Panicf(ctx, "ID is required attribute for k8s plugin") + } + + if len(info.SupportedTaskTypes) == 0 { + logger.Panicf(ctx, "AsyncPlugin should be registered to handle at least one task type") + } + + if info.PluginLoader == nil { + logger.Panicf(ctx, "PluginLoader cannot be nil") + } + + p.m.Lock() + defer p.m.Unlock() + p.corePlugin = append(p.corePlugin, internalRemote.CreateRemotePlugin(info)) +} + +func CreateRemotePlugin(pluginEntry webapi.PluginEntry) core.PluginEntry { + return internalRemote.CreateRemotePlugin(pluginEntry) +} + +// Use this method to register Kubernetes Plugins +func (p *taskPluginRegistry) RegisterK8sPlugin(info k8s.PluginEntry) { + if info.ID == "" { + logger.Panicf(context.TODO(), "ID is required attribute for k8s plugin") + } + + if len(info.RegisteredTaskTypes) == 0 { + logger.Panicf(context.TODO(), "K8s AsyncPlugin should be registered to handle at least one task type") + } + + if info.Plugin == nil { + logger.Panicf(context.TODO(), "K8s AsyncPlugin cannot be nil") + } + + if info.ResourceToWatch == nil { + logger.Panicf(context.TODO(), "The framework requires a K8s resource to watch, for valid plugin registration") + } + + p.m.Lock() + defer p.m.Unlock() + p.k8sPlugin = append(p.k8sPlugin, info) +} + +// Use this method to register core plugins +func (p *taskPluginRegistry) RegisterCorePlugin(info core.PluginEntry) { + if info.ID == "" { + logger.Panicf(context.TODO(), "ID is required attribute for k8s plugin") + } + if len(info.RegisteredTaskTypes) == 0 { + logger.Panicf(context.TODO(), "AsyncPlugin should be registered to handle at least one task type") + } + if info.LoadPlugin == nil { + logger.Panicf(context.TODO(), "PluginLoader cannot be nil") + } + + p.m.Lock() + defer p.m.Unlock() + p.corePlugin = append(p.corePlugin, info) +} + +// Returns a snapshot of all the registered core plugins. +func (p *taskPluginRegistry) GetCorePlugins() []core.PluginEntry { + p.m.Lock() + defer p.m.Unlock() + return append(p.corePlugin[:0:0], p.corePlugin...) +} + +// Returns a snapshot of all registered K8s plugins +func (p *taskPluginRegistry) GetK8sPlugins() []k8s.PluginEntry { + p.m.Lock() + defer p.m.Unlock() + return append(p.k8sPlugin[:0:0], p.k8sPlugin...) +} + +type TaskPluginRegistry interface { + RegisterK8sPlugin(info k8s.PluginEntry) + RegisterCorePlugin(info core.PluginEntry) + RegisterRemotePlugin(info webapi.PluginEntry) + GetCorePlugins() []core.PluginEntry + GetK8sPlugins() []k8s.PluginEntry +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin.go new file mode 100644 index 0000000000..22b473a560 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin.go @@ -0,0 +1,70 @@ +package tasklog + +import ( + "bytes" + "compress/gzip" + "encoding/base64" + "fmt" + "net/url" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + defaultName = "Azure Logs" + defaulQueryFormat = `let StartTime = datetime_add('hour', -1, datetime("{{.podRFC3339StartTime}}")); +let FinishTime = datetime_add('hour', 1, datetime("{{.podRFC3339FinishTime}}")); +ContainerLogV2 +| where TimeGenerated between (StartTime .. FinishTime) + and ContainerName == "{{.containerName}}" + and PodName == "{{.podName}}" + and PodNamespace == "{{.namespace}}"` +) + +// Azure Logs specific templater. +// Azure encodes two parts of the URI in two distinct ways. +// The first half the URI is usually composed of Azure tenant ID, subscription ID, resource group name, etc. +// The second half is the query itself, which is gzipped, base64, and then URL encoded. +type AzureLogsTemplatePlugin struct { + TemplateLogPlugin `json:",squash"` //nolint + + QueryFormat *string `json:"queryFormat" pflag:",The plain text query to use for Azure Logs."` +} + +func (t AzureLogsTemplatePlugin) GetTaskLogs(input Input) (Output, error) { + taskLogs := make([]*core.TaskLog, 0) + + var rawQuery string + if t.QueryFormat == nil { + rawQuery = defaulQueryFormat + } else { + rawQuery = *t.QueryFormat + } + + query := replaceAll(rawQuery, input.templateVars()) + var compressedBuffer bytes.Buffer + gzipWriter := gzip.NewWriter(&compressedBuffer) + _, err := gzipWriter.Write([]byte(query)) + if err != nil { + return Output{TaskLogs: taskLogs}, fmt.Errorf("gzip compression failed: %v", err) + } + err = gzipWriter.Close() + if err != nil { + return Output{TaskLogs: taskLogs}, fmt.Errorf("gzip writer close failed: %v", err) + } + + // Base64 and URL encoding + base64Encoded := base64.StdEncoding.EncodeToString(compressedBuffer.Bytes()) + urlEncoded := url.QueryEscape(base64Encoded) + + for _, baseURL := range t.TemplateURIs { + completeURL := fmt.Sprintf("%s%s", baseURL, urlEncoded) + taskLogs = append(taskLogs, &core.TaskLog{ + Name: t.DisplayName + input.LogName, + Uri: completeURL, + MessageFormat: core.TaskLog_JSON, + }) + } + + return Output{TaskLogs: taskLogs}, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin_test.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin_test.go new file mode 100644 index 0000000000..7caf40fc73 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/azure_plugin_test.go @@ -0,0 +1,67 @@ +package tasklog + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestAzureTemplateLogPlugin(t *testing.T) { + type args struct { + input Input + } + tests := []struct { + name string + plugin AzureLogsTemplatePlugin + args args + want Output + }{ + { + "test azure template log plugin", + AzureLogsTemplatePlugin{ + TemplateLogPlugin: TemplateLogPlugin{ + Name: "Azure Logs", + DisplayName: "Azure Logs", + TemplateURIs: []TemplateURI{"https://portal.azure.com#@test-tenantID/blade/Microsoft_OperationsManagementSuite_Workspace/Logs.ReactView/resourceId/%%2Fsubscriptions%%2Ftest-subscriptionID%%2FresourceGroups%%2Ftest-resourceGroupName/source/LogsBlade.AnalyticsShareLinkToQuery/q/"}, + }, + }, + args{ + input: Input{ + HostName: "test-host", + PodName: "test-pod", + Namespace: "test-namespace", + ContainerName: "test-container", + ContainerID: "test-containerID", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + TaskExecutionID: dummyTaskExecID(), + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Name: "Azure Logsmain_logs", + Uri: "https://portal.azure.com#@test-tenantID/blade/Microsoft_OperationsManagementSuite_Workspace/Logs.ReactView/resourceId/%%2Fsubscriptions%%2Ftest-subscriptionID%%2FresourceGroups%%2Ftest-resourceGroupName/source/LogsBlade.AnalyticsShareLinkToQuery/q/H4sIAAAAAAAA%2F3yPwUrFMBBF9%2F2KIZvX4ktJaosY6UrQjYhgcStjM9iATUo60o0fL0Fa24XuhnsuhzsfxPDMGLlzI0ELFpnYjfSK1uanIXzG0xmkPm8gF%2Fr6SkmlpdKd0kZVRl1epEOJorjJkvDOeTcP%2Fxn%2FFNamakzd7IS3wTM6T%2FEhvL9U2RcsA0WCZL8nTxGZLLwRL0Qe8t9fynK3o8gAvYXN9YhpWwuCaWbZr7H4qT0FeyxMwR7RPGG%2F436NxHcAAAD%2F%2F4NTt6FQAQAA", + MessageFormat: core.TaskLog_JSON, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.plugin.GetTaskLogs(tt.args.input) + assert.NoError(t, err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetTaskLogs() got = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/plugin.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/plugin.go new file mode 100644 index 0000000000..054acbc588 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/plugin.go @@ -0,0 +1,71 @@ +package tasklog + +import ( + "regexp" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +//go:generate enumer --type=TemplateScheme --trimprefix=TemplateScheme -json -yaml + +type TemplateScheme int + +const ( + TemplateSchemePod TemplateScheme = iota + TemplateSchemeTaskExecution +) + +// TemplateURI is a URI that accepts templates. See: go/tasks/pluginmachinery/tasklog/template.go for available templates. +type TemplateURI = string + +type TemplateVar struct { + Regex *regexp.Regexp + Value string +} + +// Input contains all available information about task's execution that a log plugin can use to construct task's +// log links. +type Input struct { + HostName string + PodName string + Namespace string + ContainerName string + ContainerID string + LogName string + PodRFC3339StartTime string + PodRFC3339FinishTime string + PodUnixStartTime int64 + PodUnixFinishTime int64 + PodUID string + TaskExecutionID pluginsCore.TaskExecutionID + ExtraTemplateVars []TemplateVar + TaskTemplate *core.TaskTemplate + EnableVscode bool + AgentID string + ConnectorID string +} + +// Output contains all task logs a plugin generates for a given Input. +type Output struct { + TaskLogs []*core.TaskLog +} + +// Plugin represents an interface for task log plugins to implement to plug generated task log links into task events. +type Plugin interface { + // Generates a TaskLog object given necessary computation information + GetTaskLogs(i Input) (logs Output, err error) +} + +type TemplateLogPlugin struct { + Name string `json:"name" pflag:",Name of the plugin."` + DisplayName string `json:"displayName" pflag:",Display name for the generated log when displayed in the console."` + TemplateURIs []TemplateURI `json:"templateUris" pflag:",URI Templates for generating task log links."` + DynamicTemplateURIs []TemplateURI `json:"dynamicTemplateUris" pflag:",URI Templates for generating dynamic task log links."` + MessageFormat core.TaskLog_MessageFormat `json:"messageFormat" pflag:"-,Log Message Format."` + // Deprecated: Please, do not use + DeprecatedScheme TemplateScheme `json:"scheme" pflag:",Templating scheme to use. Supported values are Pod and TaskExecution."` + ShowWhilePending bool `json:"showWhilePending" pflag:",If true, the log link will be shown even if the task is in a pending state."` + HideOnceFinished bool `json:"hideOnceFinished" pflag:",If true, the log link will be hidden once the task has finished."` + LinkType string `json:"linkType" pflag:",Type of the log. (external, dashboard, or ide). This is used to distinguish between different log links."` +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go new file mode 100644 index 0000000000..ff67a27eaa --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/template.go @@ -0,0 +1,281 @@ +package tasklog + +import ( + "fmt" + "regexp" + "slices" + "strconv" + "strings" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const vscode = "vscode" + +func MustCreateRegex(varName string) *regexp.Regexp { + return regexp.MustCompile(fmt.Sprintf(`(?i){{\s*[\.$]%s\s*}}`, varName)) +} + +var taskConfigVarRegex = regexp.MustCompile(`(?i){{\s*.taskConfig[\.$]([a-zA-Z_]+)\s*}}`) + +func MustCreateDynamicLogRegex(varName string) *regexp.Regexp { + return regexp.MustCompile(fmt.Sprintf(`(?i){{\s*.taskConfig[\.$]%s\s*}}`, varName)) +} + +type templateRegexes struct { + LogName *regexp.Regexp + PodName *regexp.Regexp + PodUID *regexp.Regexp + Namespace *regexp.Regexp + ContainerName *regexp.Regexp + ContainerID *regexp.Regexp + Hostname *regexp.Regexp + PodRFC3339StartTime *regexp.Regexp + PodRFC3339FinishTime *regexp.Regexp + PodUnixStartTime *regexp.Regexp + PodUnixFinishTime *regexp.Regexp + TaskID *regexp.Regexp + TaskVersion *regexp.Regexp + TaskOrg *regexp.Regexp + TaskProject *regexp.Regexp + TaskDomain *regexp.Regexp + TaskRetryAttempt *regexp.Regexp + NodeID *regexp.Regexp + ExecutionName *regexp.Regexp + ExecutionProject *regexp.Regexp + ExecutionDomain *regexp.Regexp + ExecutionOrg *regexp.Regexp + GeneratedName *regexp.Regexp + AgentID *regexp.Regexp + ConnectorID *regexp.Regexp +} + +func initDefaultRegexes() templateRegexes { + return templateRegexes{ + MustCreateRegex("logName"), + MustCreateRegex("podName"), + MustCreateRegex("podUID"), + MustCreateRegex("namespace"), + MustCreateRegex("containerName"), + MustCreateRegex("containerID"), + MustCreateRegex("hostname"), + MustCreateRegex("podRFC3339StartTime"), + MustCreateRegex("podRFC3339FinishTime"), + MustCreateRegex("podUnixStartTime"), + MustCreateRegex("podUnixFinishTime"), + MustCreateRegex("taskID"), + MustCreateRegex("taskVersion"), + MustCreateRegex("taskOrg"), + MustCreateRegex("taskProject"), + MustCreateRegex("taskDomain"), + MustCreateRegex("taskRetryAttempt"), + MustCreateRegex("nodeID"), + MustCreateRegex("executionName"), + MustCreateRegex("executionProject"), + MustCreateRegex("executionDomain"), + MustCreateRegex("executionOrg"), + MustCreateRegex("generatedName"), + MustCreateRegex("agentID"), + MustCreateRegex("connectorID"), + } +} + +var defaultRegexes = initDefaultRegexes() + +func replaceAll(template string, vars []TemplateVar) string { + for _, v := range vars { + if len(v.Value) > 0 { + template = v.Regex.ReplaceAllLiteralString(template, v.Value) + } + } + return template +} + +func (input Input) templateVars() []TemplateVar { + vars := []TemplateVar{ + TemplateVar{defaultRegexes.LogName, input.LogName}, + } + + gotExtraTemplateVars := input.ExtraTemplateVars != nil + if gotExtraTemplateVars { + vars = append(vars, input.ExtraTemplateVars...) + } + + // Container IDs are prefixed with docker://, cri-o://, etc. which is stripped by fluentd before pushing to a log + // stream. Therefore, we must also strip the prefix. + containerID := input.ContainerID + stripDelimiter := "://" + if split := strings.Split(input.ContainerID, stripDelimiter); len(split) > 1 { + containerID = split[1] + } + vars = append( + vars, + TemplateVar{defaultRegexes.PodName, input.PodName}, + TemplateVar{defaultRegexes.PodUID, input.PodUID}, + TemplateVar{defaultRegexes.Namespace, input.Namespace}, + TemplateVar{defaultRegexes.ContainerName, input.ContainerName}, + TemplateVar{defaultRegexes.ContainerID, containerID}, + TemplateVar{defaultRegexes.Hostname, input.HostName}, + ) + + if input.AgentID != "" { + vars = append(vars, TemplateVar{defaultRegexes.AgentID, input.AgentID}) + } + if input.ConnectorID != "" { + vars = append(vars, TemplateVar{defaultRegexes.ConnectorID, input.ConnectorID}) + } + + if input.TaskExecutionID != nil { + taskExecutionIdentifier := input.TaskExecutionID.GetID() + vars = append( + vars, + TemplateVar{ + defaultRegexes.NodeID, + input.TaskExecutionID.GetUniqueNodeID(), + }, + TemplateVar{ + defaultRegexes.GeneratedName, + input.TaskExecutionID.GetGeneratedName(), + }, + TemplateVar{ + defaultRegexes.TaskRetryAttempt, + strconv.FormatUint(uint64(taskExecutionIdentifier.RetryAttempt), 10), + }, + ) + if taskExecutionIdentifier.TaskId != nil { + vars = append( + vars, + TemplateVar{ + defaultRegexes.TaskID, + taskExecutionIdentifier.TaskId.Name, + }, + TemplateVar{ + defaultRegexes.TaskVersion, + taskExecutionIdentifier.TaskId.Version, + }, + TemplateVar{ + defaultRegexes.TaskOrg, + taskExecutionIdentifier.TaskId.Org, + }, + TemplateVar{ + defaultRegexes.TaskProject, + taskExecutionIdentifier.TaskId.Project, + }, + TemplateVar{ + defaultRegexes.TaskDomain, + taskExecutionIdentifier.TaskId.Domain, + }, + ) + } + if taskExecutionIdentifier.NodeExecutionId != nil && taskExecutionIdentifier.NodeExecutionId.ExecutionId != nil { + vars = append( + vars, + TemplateVar{ + defaultRegexes.ExecutionName, + taskExecutionIdentifier.NodeExecutionId.ExecutionId.Name, + }, + TemplateVar{ + defaultRegexes.ExecutionProject, + taskExecutionIdentifier.NodeExecutionId.ExecutionId.Project, + }, + TemplateVar{ + defaultRegexes.ExecutionDomain, + taskExecutionIdentifier.NodeExecutionId.ExecutionId.Domain, + }, + TemplateVar{ + defaultRegexes.ExecutionOrg, + taskExecutionIdentifier.NodeExecutionId.ExecutionId.Org, + }, + ) + } + } + + vars = append( + vars, + TemplateVar{defaultRegexes.PodRFC3339StartTime, input.PodRFC3339StartTime}, + TemplateVar{defaultRegexes.PodRFC3339FinishTime, input.PodRFC3339FinishTime}, + TemplateVar{ + defaultRegexes.PodUnixStartTime, + strconv.FormatInt(input.PodUnixStartTime, 10), + }, + TemplateVar{ + defaultRegexes.PodUnixFinishTime, + strconv.FormatInt(input.PodUnixFinishTime, 10), + }, + ) + + return vars +} + +func getDynamicLogLinkTypes(input Input) []string { + var dynamicLogLinkTypes []string + if input.EnableVscode { + dynamicLogLinkTypes = []string{vscode} + } + + if input.TaskTemplate == nil { + return dynamicLogLinkTypes + } + + config := input.TaskTemplate.GetConfig() + if config == nil { + return dynamicLogLinkTypes + } + linkType := config["link_type"] + if linkType == "" { + return dynamicLogLinkTypes + } + logLinkTypes := append(strings.Split(linkType, ","), dynamicLogLinkTypes...) + slices.Sort(logLinkTypes) + return slices.Compact(logLinkTypes) +} + +func (p TemplateLogPlugin) GetTaskLogs(input Input) (Output, error) { + templateVars := input.templateVars() + linkType := core.TaskLog_EXTERNAL + if len(p.LinkType) != 0 { + linkType = core.TaskLog_LinkType(core.TaskLog_LinkType_value[strings.ToUpper(p.LinkType)]) + } + + taskLogs := make([]*core.TaskLog, 0, len(p.TemplateURIs)) + for _, templateURI := range p.TemplateURIs { + taskLogs = append(taskLogs, &core.TaskLog{ + Uri: replaceAll(templateURI, templateVars), + Name: replaceAll(p.DisplayName, templateVars) + replaceAll(input.LogName, templateVars), + MessageFormat: p.MessageFormat, + ShowWhilePending: p.ShowWhilePending, + HideOnceFinished: p.HideOnceFinished, + LinkType: linkType, + Ready: true, + }) + } + + for _, dynamicLogLinkType := range getDynamicLogLinkTypes(input) { + for _, dynamicTemplateURI := range p.DynamicTemplateURIs { + if p.Name == dynamicLogLinkType { + for _, match := range taskConfigVarRegex.FindAllStringSubmatch(dynamicTemplateURI, -1) { + if len(match) > 1 { + if value, found := input.TaskTemplate.GetConfig()[match[1]]; found { + templateVars = append(templateVars, TemplateVar{MustCreateDynamicLogRegex(match[1]), value}) + } + } + } + if dynamicLogLinkType == vscode { + linkType = core.TaskLog_IDE + } + + taskLogs = append(taskLogs, &core.TaskLog{ + Uri: replaceAll(dynamicTemplateURI, templateVars), + Name: p.DisplayName + input.LogName, + MessageFormat: p.MessageFormat, + ShowWhilePending: p.ShowWhilePending, + HideOnceFinished: p.HideOnceFinished, + LinkType: linkType, + Ready: true, + }) + } + } + } + + return Output{TaskLogs: taskLogs}, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go new file mode 100644 index 0000000000..5c5bd16e5a --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/template_test.go @@ -0,0 +1,681 @@ +package tasklog + +import ( + "reflect" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" + + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + coreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +// Latest Run: Benchmark_mustInitTemplateRegexes-16 45960 26914 ns/op +func Benchmark_initDefaultRegexes(b *testing.B) { + for i := 0; i < b.N; i++ { + initDefaultRegexes() + } +} + +func dummyTaskExecID() pluginCore.TaskExecutionID { + tID := &coreMocks.TaskExecutionID{} + tID.OnGetGeneratedName().Return("generated-name") + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Name: "my-task-name", + Project: "my-task-project", + Domain: "my-task-domain", + Version: "1", + Org: "my-task-org", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my-execution-name", + Project: "my-execution-project", + Domain: "my-execution-domain", + Org: "my-execution-org", + }, + }, + RetryAttempt: 1, + }) + tID.OnGetUniqueNodeID().Return("n0-0-n0") + return tID +} + +func Test_Input_templateVars(t *testing.T) { + testRegexes := struct { + Foo *regexp.Regexp + Bar *regexp.Regexp + Baz *regexp.Regexp + Ham *regexp.Regexp + Spam *regexp.Regexp + LinkType *regexp.Regexp + Port *regexp.Regexp + }{ + MustCreateRegex("foo"), + MustCreateRegex("bar"), + MustCreateRegex("baz"), + MustCreateRegex("ham"), + MustCreateRegex("spam"), + MustCreateDynamicLogRegex("link_type"), + MustCreateDynamicLogRegex("port"), + } + podBase := Input{ + HostName: "my-host", + PodName: "my-pod", + PodUID: "my-pod-uid", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "docker://containerID", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + } + taskExecutionBase := Input{ + LogName: "main_logs", + TaskExecutionID: dummyTaskExecID(), + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + } + + tests := []struct { + name string + baseVars Input + extraVars []TemplateVar + exact []TemplateVar + contains []TemplateVar + notContains []TemplateVar + }{ + { + "pod happy path", + podBase, + nil, + []TemplateVar{ + {defaultRegexes.LogName, "main_logs"}, + {defaultRegexes.PodName, "my-pod"}, + {defaultRegexes.PodUID, "my-pod-uid"}, + {defaultRegexes.Namespace, "my-namespace"}, + {defaultRegexes.ContainerName, "my-container"}, + {defaultRegexes.ContainerID, "containerID"}, + {defaultRegexes.Hostname, "my-host"}, + {defaultRegexes.PodRFC3339StartTime, "1970-01-01T01:02:03+01:00"}, + {defaultRegexes.PodRFC3339FinishTime, "1970-01-01T04:25:45+01:00"}, + {defaultRegexes.PodUnixStartTime, "123"}, + {defaultRegexes.PodUnixFinishTime, "12345"}, + }, + nil, + nil, + }, + { + "pod with extra vars", + podBase, + []TemplateVar{ + {testRegexes.Foo, "foo"}, + {testRegexes.Bar, "bar"}, + {testRegexes.Baz, "baz"}, + }, + nil, + []TemplateVar{ + {testRegexes.Foo, "foo"}, + {testRegexes.Bar, "bar"}, + {testRegexes.Baz, "baz"}, + }, + nil, + }, + { + "task execution happy path", + taskExecutionBase, + nil, + []TemplateVar{ + {defaultRegexes.LogName, "main_logs"}, + {defaultRegexes.PodName, ""}, + {defaultRegexes.PodUID, ""}, + {defaultRegexes.Namespace, ""}, + {defaultRegexes.ContainerName, ""}, + {defaultRegexes.ContainerID, ""}, + {defaultRegexes.Hostname, ""}, + {defaultRegexes.NodeID, "n0-0-n0"}, + {defaultRegexes.GeneratedName, "generated-name"}, + {defaultRegexes.TaskRetryAttempt, "1"}, + {defaultRegexes.TaskID, "my-task-name"}, + {defaultRegexes.TaskVersion, "1"}, + {defaultRegexes.TaskOrg, "my-task-org"}, + {defaultRegexes.TaskProject, "my-task-project"}, + {defaultRegexes.TaskDomain, "my-task-domain"}, + {defaultRegexes.ExecutionName, "my-execution-name"}, + {defaultRegexes.ExecutionProject, "my-execution-project"}, + {defaultRegexes.ExecutionDomain, "my-execution-domain"}, + {defaultRegexes.ExecutionOrg, "my-execution-org"}, + {defaultRegexes.PodRFC3339StartTime, "1970-01-01T01:02:03+01:00"}, + {defaultRegexes.PodRFC3339FinishTime, "1970-01-01T04:25:45+01:00"}, + {defaultRegexes.PodUnixStartTime, "123"}, + {defaultRegexes.PodUnixFinishTime, "12345"}, + }, + nil, + nil, + }, + { + "task execution with extra vars", + taskExecutionBase, + []TemplateVar{ + {testRegexes.Foo, "foo"}, + {testRegexes.Bar, "bar"}, + {testRegexes.Baz, "baz"}, + }, + nil, + []TemplateVar{ + {testRegexes.Foo, "foo"}, + {testRegexes.Bar, "bar"}, + {testRegexes.Baz, "baz"}, + }, + nil, + }, + { + "pod with port not affected", + podBase, + nil, + nil, + nil, + []TemplateVar{ + {testRegexes.Port, "1234"}, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + base := tt.baseVars + base.ExtraTemplateVars = tt.extraVars + got := base.templateVars() + if tt.exact != nil { + assert.Equal(t, got, tt.exact) + } + if tt.contains != nil { + for _, c := range tt.contains { + assert.Contains(t, got, c) + } + } + if tt.notContains != nil { + for _, c := range tt.notContains { + assert.NotContains(t, got, c) + } + } + }) + } +} + +func TestTemplateLogPlugin(t *testing.T) { + type args struct { + input Input + } + tests := []struct { + name string + plugin TemplateLogPlugin + args args + want Output + }{ + { + "cloudwatch", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logEventViewer:group=/flyte-production/kubernetes;stream=var.log.containers.{{.podName}}_{{.namespace}}_{{.containerName}}-{{.containerId}}.log"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + PodName: "f-uuid-driver", + PodUID: "pod-uid", + Namespace: "flyteexamples-production", + ContainerName: "spark-kubernetes-driver", + ContainerID: "cri-o://abc", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{TaskLogs: []*core.TaskLog{{ + Uri: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logEventViewer:group=/flyte-production/kubernetes;stream=var.log.containers.f-uuid-driver_flyteexamples-production_spark-kubernetes-driver-abc.log", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }}}, + }, + { + "stackdriver", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://console.cloud.google.com/logs/viewer?project=test-gcp-project&angularJsUrl=%2Flogs%2Fviewer%3Fproject%3Dtest-gcp-project&resource=aws_ec2_instance&advancedFilter=resource.labels.pod_name%3D{{.podName}}"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + PodName: "podName", + PodUID: "pod-uid", + Namespace: "flyteexamples-production", + ContainerName: "spark-kubernetes-driver", + ContainerID: "cri-o://abc", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{TaskLogs: []*core.TaskLog{{ + Uri: "https://console.cloud.google.com/logs/viewer?project=test-gcp-project&angularJsUrl=%2Flogs%2Fviewer%3Fproject%3Dtest-gcp-project&resource=aws_ec2_instance&advancedFilter=resource.labels.pod_name%3DpodName", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }}}, + }, + { + "kubernetes", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://dashboard.k8s.net/#!/log/{{.namespace}}/{{.podName}}/pod?namespace={{.namespace}}"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + PodName: "flyteexamples-development-task-name", + PodUID: "pod-uid", + Namespace: "flyteexamples-development", + ContainerName: "ignore", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{TaskLogs: []*core.TaskLog{{ + Uri: "https://dashboard.k8s.net/#!/log/flyteexamples-development/flyteexamples-development-task-name/pod?namespace=flyteexamples-development", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }}}, + }, + { + "splunk", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://prd-p-ighar.splunkcloud.com/en-US/app/search/search?q=search%20container_name%3D%22{{ .containerName }}%22"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://prd-p-ighar.splunkcloud.com/en-US/app/search/search?q=search%20container_name%3D%22my-container%22", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "ddog", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://app.datadoghq.com/logs?event&from_ts={{ .podUnixStartTime }}&live=true&query=pod_name%3A{{ .podName }}&to_ts={{ .podUnixFinishTime }}"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://app.datadoghq.com/logs?event&from_ts=123&live=true&query=pod_name%3Amy-pod&to_ts=12345", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "stackdriver-with-rfc3339-timestamp", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://console.cloud.google.com/logs/viewer?project=test-gcp-project&angularJsUrl=%2Flogs%2Fviewer%3Fproject%3Dtest-gcp-project&resource=aws_ec2_instance&advancedFilter=resource.labels.pod_name%3D{{.podName}}%20%22{{.podRFC3339StartTime}}%22"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://console.cloud.google.com/logs/viewer?project=test-gcp-project&angularJsUrl=%2Flogs%2Fviewer%3Fproject%3Dtest-gcp-project&resource=aws_ec2_instance&advancedFilter=resource.labels.pod_name%3Dmy-pod%20%221970-01-01T01:02:03+01:00%22", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "task-with-task-execution-identifier", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://flyte.corp.net/console/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/attempt/{{ .taskRetryAttempt }}/view/logs"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + TaskExecutionID: dummyTaskExecID(), + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://flyte.corp.net/console/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/n0-0-n0/taskId/my-task-name/attempt/1/view/logs", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "task-with-task-execution-identifier-with-org", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://flyte.corp.net/console/org/{{ .executionOrg }}/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/taskOrg/{{ .taskOrg }}/attempt/{{ .taskRetryAttempt }}/view/logs"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + TaskExecutionID: dummyTaskExecID(), + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://flyte.corp.net/console/org/my-execution-org/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/n0-0-n0/taskId/my-task-name/taskOrg/my-task-org/attempt/1/view/logs", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "mapped-task-with-task-execution-identifier", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://flyte.corp.net/console/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/attempt/{{ .subtaskParentRetryAttempt }}/mappedIndex/{{ .subtaskExecutionIndex }}/mappedAttempt/{{ .subtaskRetryAttempt }}/view/logs"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + HostName: "my-host", + PodName: "my-pod", + Namespace: "my-namespace", + ContainerName: "my-container", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + TaskExecutionID: dummyTaskExecID(), + ExtraTemplateVars: []TemplateVar{ + {MustCreateRegex("subtaskExecutionIndex"), "1"}, + {MustCreateRegex("subtaskRetryAttempt"), "1"}, + {MustCreateRegex("subtaskParentRetryAttempt"), "0"}, + }, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "https://flyte.corp.net/console/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/n0-0-n0/taskId/my-task-name/attempt/0/mappedIndex/1/mappedAttempt/1/view/logs", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }, + }, + }, + }, + { + "flyteinteractive", + TemplateLogPlugin{ + Name: vscode, + DynamicTemplateURIs: []TemplateURI{"vscode://flyteinteractive:{{ .taskConfig.port }}/{{ .podName }}"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": vscode, + "port": "1234", + }, + }, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "vscode://flyteinteractive:1234/my-pod-name", + MessageFormat: core.TaskLog_JSON, + LinkType: core.TaskLog_IDE, + Ready: true, + }, + }, + }, + }, + { + "flyteinteractive", + TemplateLogPlugin{ + Name: "vscode", + DynamicTemplateURIs: []TemplateURI{"vscode://flyteinteractive:{{ .taskConfig.port }}/{{ .podName }}"}, + MessageFormat: core.TaskLog_JSON, + HideOnceFinished: true, + ShowWhilePending: true, + }, + args{ + input: Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode", + "port": "1234", + }, + }, + }, + }, + Output{ + TaskLogs: []*core.TaskLog{ + { + Uri: "vscode://flyteinteractive:1234/my-pod-name", + MessageFormat: core.TaskLog_JSON, + ShowWhilePending: true, + HideOnceFinished: true, + LinkType: core.TaskLog_IDE, + Ready: true, + }, + }, + }, + }, + { + "flyteinteractive - no link_type in task template", + TemplateLogPlugin{ + Name: vscode, + DynamicTemplateURIs: []TemplateURI{"vscode://flyteinteractive:{{ .taskConfig.port }}/{{ .podName }}"}, + MessageFormat: core.TaskLog_JSON, + DisplayName: "Flyteinteractive Logs", + }, + args{ + input: Input{ + PodName: "my-pod-name", + }, + }, + Output{ + TaskLogs: []*core.TaskLog{}, + }, + }, + { + "kubernetes", + TemplateLogPlugin{ + TemplateURIs: []TemplateURI{"https://dashboard.k8s.net/#!/log/{{.namespace}}/{{.podName}}/pod?namespace={{.namespace}}"}, + MessageFormat: core.TaskLog_JSON, + }, + args{ + input: Input{ + PodName: "flyteexamples-development-task-name", + PodUID: "pod-uid", + Namespace: "flyteexamples-development", + ContainerName: "ignore", + ContainerID: "ignore", + LogName: "main_logs", + PodRFC3339StartTime: "1970-01-01T01:02:03+01:00", + PodRFC3339FinishTime: "1970-01-01T04:25:45+01:00", + PodUnixStartTime: 123, + PodUnixFinishTime: 12345, + }, + }, + Output{TaskLogs: []*core.TaskLog{{ + Uri: "https://dashboard.k8s.net/#!/log/flyteexamples-development/flyteexamples-development-task-name/pod?namespace=flyteexamples-development", + MessageFormat: core.TaskLog_JSON, + Name: "main_logs", + Ready: true, + }}}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := tt.plugin.GetTaskLogs(tt.args.input) + assert.NoError(t, err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("GetTaskLogs() got = %v, want %v", got, tt.want) + } + }) + } +} + +func TestGetDynamicLogLinkTypes(t *testing.T) { + linkTypes := getDynamicLogLinkTypes(Input{}) + assert.Nil(t, linkTypes) + + linkTypes = getDynamicLogLinkTypes(Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{}, + }) + assert.Nil(t, linkTypes) + + // Test that empty input with vscode enabled returns vscode + linkTypes = getDynamicLogLinkTypes(Input{ + EnableVscode: true, + }) + assert.Equal(t, []string{vscode}, linkTypes) + + // Test that nil TaskTemplate returns dynamicLogLinkTypes when vscode enabled + linkTypes = getDynamicLogLinkTypes(Input{ + EnableVscode: true, + TaskTemplate: nil, + }) + assert.Equal(t, []string{vscode}, linkTypes) + + linkTypes = getDynamicLogLinkTypes(Input{ + EnableVscode: true, + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": vscode, + "port": "8080", + }, + }, + }) + assert.Equal(t, []string{vscode}, linkTypes) + + linkTypes = getDynamicLogLinkTypes(Input{ + EnableVscode: true, + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "vscode,vscode", + }, + }, + }) + assert.Equal(t, []string{vscode}, linkTypes) + + linkTypes = getDynamicLogLinkTypes(Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": vscode, + "port": "8080", + }, + }, + }) + assert.Equal(t, []string{vscode}, linkTypes) + linkTypes = getDynamicLogLinkTypes(Input{ + PodName: "my-pod-name", + TaskTemplate: &core.TaskTemplate{ + Config: map[string]string{ + "link_type": "wandb", + "port": "8080", + }, + }, + EnableVscode: true, + }) + assert.Equal(t, []string{vscode, "wandb"}, linkTypes) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/tasklog/templatescheme_enumer.go b/flyteplugins/go/tasks/pluginmachinery/tasklog/templatescheme_enumer.go new file mode 100644 index 0000000000..70f15faf01 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/tasklog/templatescheme_enumer.go @@ -0,0 +1,84 @@ +// Code generated by "enumer --type=TemplateScheme --trimprefix=TemplateScheme -json -yaml"; DO NOT EDIT. + +package tasklog + +import ( + "encoding/json" + "fmt" +) + +const _TemplateSchemeName = "PodTaskExecution" + +var _TemplateSchemeIndex = [...]uint8{0, 3, 16} + +func (i TemplateScheme) String() string { + if i < 0 || i >= TemplateScheme(len(_TemplateSchemeIndex)-1) { + return fmt.Sprintf("TemplateScheme(%d)", i) + } + return _TemplateSchemeName[_TemplateSchemeIndex[i]:_TemplateSchemeIndex[i+1]] +} + +var _TemplateSchemeValues = []TemplateScheme{0, 1} + +var _TemplateSchemeNameToValueMap = map[string]TemplateScheme{ + _TemplateSchemeName[0:3]: 0, + _TemplateSchemeName[3:16]: 1, +} + +// TemplateSchemeString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func TemplateSchemeString(s string) (TemplateScheme, error) { + if val, ok := _TemplateSchemeNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to TemplateScheme values", s) +} + +// TemplateSchemeValues returns all values of the enum +func TemplateSchemeValues() []TemplateScheme { + return _TemplateSchemeValues +} + +// IsATemplateScheme returns "true" if the value is listed in the enum definition. "false" otherwise +func (i TemplateScheme) IsATemplateScheme() bool { + for _, v := range _TemplateSchemeValues { + if i == v { + return true + } + } + return false +} + +// MarshalJSON implements the json.Marshaler interface for TemplateScheme +func (i TemplateScheme) MarshalJSON() ([]byte, error) { + return json.Marshal(i.String()) +} + +// UnmarshalJSON implements the json.Unmarshaler interface for TemplateScheme +func (i *TemplateScheme) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return fmt.Errorf("TemplateScheme should be a string, got %s", data) + } + + var err error + *i, err = TemplateSchemeString(s) + return err +} + +// MarshalYAML implements a YAML Marshaler for TemplateScheme +func (i TemplateScheme) MarshalYAML() (interface{}, error) { + return i.String(), nil +} + +// UnmarshalYAML implements a YAML Unmarshaler for TemplateScheme +func (i *TemplateScheme) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + + var err error + *i, err = TemplateSchemeString(s) + return err +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/dns.go b/flyteplugins/go/tasks/pluginmachinery/utils/dns.go new file mode 100644 index 0000000000..fa7f7ae405 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/dns.go @@ -0,0 +1,39 @@ +package utils + +import ( + "regexp" + "strings" + + "k8s.io/apimachinery/pkg/util/validation" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/encoding" + "github.com/flyteorg/flyte/v2/flytestdlib/utils" +) + +var dns1123InvalidRegex = regexp.MustCompile("[^-.a-z0-9]") +var camelCaseRegex = regexp.MustCompile("([a-z0-9])([A-Z])") + +// ConvertToDNS1123SubdomainCompatibleString converts a string that doesn't conform to the definition of a subdomain in DNS (RFC 1123) to a string that conforms. It doesn't do well on labels (separated by dots) starting or ending with hyphens. +func ConvertToDNS1123SubdomainCompatibleString(name string) string { + if errs := validation.IsDNS1123Subdomain(name); len(errs) == 0 { + return name + } + name = ConvertCamelCaseToKebabCase(name) // best effort to preserve readability for Java class name + name = strings.ToLower(name) + name = dns1123InvalidRegex.ReplaceAllString(name, "") + name = strings.Trim(name, ".-") + if len(name) > validation.DNS1123SubdomainMaxLength { + fixedLengthID, err := encoding.FixedLengthUniqueID(name, utils.MaxUniqueIDLength) + if err == nil { + name = name[:validation.DNS1123SubdomainMaxLength-utils.MaxUniqueIDLength-1] + "-" + fixedLengthID + } else { + name = name[:validation.DNS1123SubdomainMaxLength] + } + } + return name +} + +// ConvertCamelCaseToKebabCase rewrites a string written in camel case (e.g. PenPineappleApplePen) in kebab case (pen-pineapple-apple-pen) +func ConvertCamelCaseToKebabCase(name string) string { + return strings.ToLower(camelCaseRegex.ReplaceAllString(name, "${1}-${2}")) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/dns_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/dns_test.go new file mode 100644 index 0000000000..b33c2ef1b0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/dns_test.go @@ -0,0 +1,109 @@ +package utils + +import ( + "testing" + + "k8s.io/apimachinery/pkg/util/validation" +) + +func TestConvertToDNS1123CompatibleString(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "flytekit-java task execution", + args: args{"orgflyteexamplesHelloWorldTask-0"}, + want: "orgflyteexamples-hello-world-task-0", + }, + { + name: "good pod name", + args: args{"t7vyqhzju1-fib-5-0"}, + want: "t7vyqhzju1-fib-5-0", + }, + { + name: "good pod name with dots", + args: args{"t7v.yqh.zju1-fib-5-0"}, + want: "t7v.yqh.zju1-fib-5-0", + }, + { + name: "leading hyphen", + args: args{"-t7vyqhzju1-fib-5-0"}, + want: "t7vyqhzju1-fib-5-0", + }, + { + name: "leading dot", + args: args{".t7vyqhzju1-fib-5-0"}, + want: "t7vyqhzju1-fib-5-0", + }, + { + name: "trailing hyphen", + args: args{"t7vyqhzju1-fib-5-0-"}, + want: "t7vyqhzju1-fib-5-0", + }, + { + name: "trailing dot", + args: args{"t7vyqhzju1-fib-5-0."}, + want: "t7vyqhzju1-fib-5-0", + }, + { + name: "long name", + args: args{"0123456789012345678901234567890123456789012345678901234567890123456789"}, + want: "0123456789012345678901234567890123456789012345678901234567890123456789", + }, + { + name: "longer than max len (253)", + args: args{"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789"}, + want: "012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901-fbbrvh4i", + }, + { + name: "very invalid name", + args: args{"---..t7vyqhzjJcI==u1-HelloWorldTask[].-.-."}, + want: "t7vyqhzj-jc-iu1-hello-world-task", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := ConvertToDNS1123SubdomainCompatibleString(tt.args.name) + if errs := validation.IsDNS1123Subdomain(got); len(errs) > 0 { + t.Errorf("ConvertToDNS1123SubdomainCompatibleString() = %v, which is not DNS-1123 subdomain compatible", got) + } + if got != tt.want { + t.Errorf("ConvertToDNS1123SubdomainCompatibleString() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestConvertCamelCaseToKebabCase(t *testing.T) { + type args struct { + name string + } + tests := []struct { + name string + args args + want string + }{ + { + name: "flytekit-java task execution", + args: args{"orgflyteexamplesHelloWorldTask"}, + want: "orgflyteexamples-hello-world-task", + }, + { + name: "good pod name", + args: args{"t7vyqhzju1-fib-5-0"}, + want: "t7vyqhzju1-fib-5-0", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := ConvertCamelCaseToKebabCase(tt.args.name); got != tt.want { + t.Errorf("ConvertCamelCaseToKebabCase() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/error_collection.go b/flyteplugins/go/tasks/pluginmachinery/utils/error_collection.go new file mode 100644 index 0000000000..f833b994c2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/error_collection.go @@ -0,0 +1,19 @@ +package utils + +import ( + "fmt" + "strings" +) + +type ErrorCollection struct { + Errors []error +} + +func (e ErrorCollection) Error() string { + sb := strings.Builder{} + for idx, err := range e.Errors { + sb.WriteString(fmt.Sprintf("%v: %v\r\n", idx, err)) + } + + return sb.String() +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/error_collection_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/error_collection_test.go new file mode 100644 index 0000000000..dd53182511 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/error_collection_test.go @@ -0,0 +1,22 @@ +package utils + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorCollection(t *testing.T) { + ec := ErrorCollection{} + + assert.Empty(t, ec.Error()) + + ec.Errors = append(ec.Errors, fmt.Errorf("error1")) + assert.NotEmpty(t, ec.Error()) + + ec.Errors = append(ec.Errors, fmt.Errorf("error2")) + assert.NotEmpty(t, ec.Error()) + + assert.Equal(t, "0: error1\r\n1: error2\r\n", ec.Error()) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/maps.go b/flyteplugins/go/tasks/pluginmachinery/utils/maps.go new file mode 100644 index 0000000000..2e0f9dbb91 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/maps.go @@ -0,0 +1,19 @@ +package utils + +// This function unions a list of maps (each can be nil or populated) by allocating a new map. +// Conflicting keys will always defer to the later input map's corresponding value. +func UnionMaps(maps ...map[string]string) map[string]string { + size := 0 + for _, m := range maps { + size += len(m) + } + + composite := make(map[string]string, size) + for _, m := range maps { + for k, v := range m { + composite[k] = v + } + } + + return composite +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/maps_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/maps_test.go new file mode 100644 index 0000000000..485e811377 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/maps_test.go @@ -0,0 +1,30 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUnionMaps(t *testing.T) { + assert.EqualValues(t, map[string]string{ + "left": "only", + }, UnionMaps(map[string]string{ + "left": "only", + }, nil)) + + assert.EqualValues(t, map[string]string{ + "right": "only", + }, UnionMaps(nil, map[string]string{ + "right": "only", + })) + + assert.EqualValues(t, map[string]string{ + "left": "val", + "right": "val", + }, UnionMaps(map[string]string{ + "left": "val", + }, map[string]string{ + "right": "val", + })) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils.go b/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils.go new file mode 100755 index 0000000000..d437b47a0e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils.go @@ -0,0 +1,92 @@ +package utils + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" + structpb "github.com/golang/protobuf/ptypes/struct" +) + +var jsonPbMarshaler = jsonpb.Marshaler{} +var jsonPbUnmarshaler = &jsonpb.Unmarshaler{ + AllowUnknownFields: true, +} + +// Deprecated: Use flytestdlib/utils.UnmarshalStructToPb instead. +func UnmarshalStruct(structObj *structpb.Struct, msg proto.Message) error { + if structObj == nil { + return fmt.Errorf("nil Struct Object passed") + } + + jsonObj, err := jsonPbMarshaler.MarshalToString(structObj) + if err != nil { + return err + } + + if err = jsonPbUnmarshaler.Unmarshal(strings.NewReader(jsonObj), msg); err != nil { + return err + } + + return nil +} + +// Deprecated: Use flytestdlib/utils.MarshalPbToStruct instead. +func MarshalStruct(in proto.Message, out *structpb.Struct) error { + if out == nil { + return fmt.Errorf("nil Struct Object passed") + } + + jsonObj, err := jsonPbMarshaler.MarshalToString(in) + if err != nil { + return err + } + + if err = jsonpb.UnmarshalString(jsonObj, out); err != nil { + return err + } + + return nil +} + +// Deprecated: Use flytestdlib/utils.MarshalToString instead. +func MarshalToString(msg proto.Message) (string, error) { + return jsonPbMarshaler.MarshalToString(msg) +} + +// Deprecated: Use flytestdlib/utils.MarshalObjToStruct instead. +// Don't use this if input is a proto Message. +func MarshalObjToStruct(input interface{}) (*structpb.Struct, error) { + b, err := json.Marshal(input) + if err != nil { + return nil, err + } + + // Turn JSON into a protobuf struct + structObj := &structpb.Struct{} + if err := jsonpb.UnmarshalString(string(b), structObj); err != nil { + return nil, err + } + return structObj, nil +} + +// Deprecated: Use flytestdlib/utils.UnmarshalStructToObj instead. +// Don't use this if the unmarshalled obj is a proto message. +func UnmarshalStructToObj(structObj *structpb.Struct, obj interface{}) error { + if structObj == nil { + return fmt.Errorf("nil Struct Object passed") + } + + jsonObj, err := json.Marshal(structObj) + if err != nil { + return err + } + + if err = json.Unmarshal(jsonObj, obj); err != nil { + return err + } + + return nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils_test.go new file mode 100644 index 0000000000..abe1b7d2a2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/marshal_utils_test.go @@ -0,0 +1,54 @@ +package utils + +import ( + "encoding/json" + "testing" + + "github.com/go-test/deep" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" +) + +func TestUnmarshalStructToObj(t *testing.T) { + t.Run("no nil structs allowed", func(t *testing.T) { + var podSpec v1.PodSpec + err := UnmarshalStructToObj(nil, &podSpec) + assert.EqualError(t, err, "nil Struct Object passed") + }) + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "a container", + }, + { + Name: "another container", + }, + }, + } + + b, err := json.Marshal(podSpec) + if err != nil { + t.Fatal(err) + } + + structObj := &structpb.Struct{} + if err := json.Unmarshal(b, structObj); err != nil { + t.Fatal(err) + } + + t.Run("no nil pointers as obj allowed", func(t *testing.T) { + var nilPodspec *v1.PodSpec + err := UnmarshalStructToObj(structObj, nilPodspec) + assert.EqualError(t, err, "json: Unmarshal(nil *v1.PodSpec)") + }) + + t.Run("happy case", func(t *testing.T) { + var podSpecObj v1.PodSpec + err := UnmarshalStructToObj(structObj, &podSpecObj) + assert.NoError(t, err) + if diff := deep.Equal(podSpecObj, podSpec); diff != nil { + t.Errorf("UnmarshalStructToObj() got = %v, want %v, diff: %v", podSpecObj, podSpec, diff) + } + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go new file mode 100644 index 0000000000..162bd8a496 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler.go @@ -0,0 +1,85 @@ +package secrets + +import ( + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/proto" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/encoding" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + annotationPrefix = "flyte.secrets/s" + PodLabel = "inject-flyte-secrets" + PodLabelValue = "true" +) + +// Copied from: +// https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/validation/objectmeta.go#L36 +const totalAnnotationSizeLimitB int = 256 * (1 << 10) // 256 kB + +func encodeSecret(secretAsString string) string { + res := encoding.Base32Encoder.EncodeToString([]byte(secretAsString)) + return strings.TrimSuffix(res, "=") +} + +func decodeSecret(encoded string) (string, error) { + decodedRaw, err := encoding.Base32Encoder.DecodeString(encoded) + if err != nil { + return encoded, err + } + + return string(decodedRaw), nil +} + +func marshalSecret(s *core.Secret) string { + return encodeSecret(proto.MarshalTextString(s)) +} + +func unmarshalSecret(encoded string) (*core.Secret, error) { + decoded, err := decodeSecret(encoded) + if err != nil { + return nil, err + } + + s := &core.Secret{} + err = proto.UnmarshalText(decoded, s) + return s, err +} + +func MarshalSecretsToMapStrings(secrets []*core.Secret) (map[string]string, error) { + res := make(map[string]string, len(secrets)) + for index, s := range secrets { + if _, found := core.Secret_MountType_name[int32(s.MountRequirement)]; !found { + return nil, fmt.Errorf("invalid mount requirement [%v]", s.MountRequirement) + } + + encodedSecret := marshalSecret(s) + res[annotationPrefix+strconv.Itoa(index)] = encodedSecret + + if len(encodedSecret) > totalAnnotationSizeLimitB { + return nil, fmt.Errorf("secret descriptor cannot exceed [%v]", totalAnnotationSizeLimitB) + } + } + + return res, nil +} + +func UnmarshalStringMapToSecrets(m map[string]string) ([]*core.Secret, error) { + res := make([]*core.Secret, 0, len(m)) + for key, val := range m { + if strings.HasPrefix(key, annotationPrefix) { + s, err := unmarshalSecret(val) + if err != nil { + return nil, fmt.Errorf("error unmarshaling secret [%v]. Error: %w", key, err) + } + + res = append(res, s) + } + } + + return res, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler_test.go new file mode 100644 index 0000000000..b07899ee63 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/secrets/marshaler_test.go @@ -0,0 +1,77 @@ +package secrets + +import ( + "reflect" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestEncodeSecretGroup(t *testing.T) { + input := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz01234567890._-/" + encoded := encodeSecret(input) + t.Log(input + " -> " + encoded) + decoded, err := decodeSecret(encoded) + assert.NoError(t, err) + assert.Equal(t, input, decoded) +} + +func TestMarshalSecretsToMapStrings(t *testing.T) { + type args struct { + secrets []*core.Secret + } + tests := []struct { + name string + args args + want map[string]string + wantErr bool + }{ + {name: "empty", args: args{secrets: []*core.Secret{}}, want: map[string]string{}, wantErr: false}, + {name: "nil", args: args{secrets: nil}, want: map[string]string{}, wantErr: false}, + {name: "forbidden characters", args: args{secrets: []*core.Secret{ + { + Group: ";':/\\", + }, + }}, want: map[string]string{ + "flyte.secrets/s0": "m4zg54lqhiqceozhhixvyxbcbi", + }, wantErr: false}, + {name: "Without group", args: args{secrets: []*core.Secret{ + { + Key: "my_key", + }, + }}, want: map[string]string{ + "flyte.secrets/s0": "nnsxsoraejwxsx2lmv3secq", + }, wantErr: false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := MarshalSecretsToMapStrings(tt.args.secrets) + if (err != nil) != tt.wantErr { + t.Errorf("MarshalSecretsToMapStrings() error = %v, wantErr %v", err, tt.wantErr) + return + } else if err != nil { + return + } + + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("MarshalSecretsToMapStrings() got = %v, want %v", got, tt.want) + } + }) + + t.Run(tt.name+"_unmarshal", func(t *testing.T) { + got, err := UnmarshalStringMapToSecrets(tt.want) + if (err != nil) != tt.wantErr { + t.Errorf("UnmarshalSecretsToMapStrings() error = %v, wantErr %v", err, tt.wantErr) + return + } else if err != nil { + return + } + + if tt.args.secrets != nil && !reflect.DeepEqual(got, tt.args.secrets) { + t.Errorf("UnmarshalSecretsToMapStrings() got = %v, want %v", got, tt.args.secrets) + } + }) + } +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/transformers.go b/flyteplugins/go/tasks/pluginmachinery/utils/transformers.go new file mode 100755 index 0000000000..3faf6f12b3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/transformers.go @@ -0,0 +1,26 @@ +package utils + +func CopyMap(o map[string]string) (r map[string]string) { + if o == nil { + return nil + } + r = make(map[string]string, len(o)) + for k, v := range o { + r[k] = v + } + return +} + +func Contains(s []string, e string) bool { + if s == nil { + return false + } + + for _, a := range s { + if a == e { + return true + } + } + + return false +} diff --git a/flyteplugins/go/tasks/pluginmachinery/utils/transformers_test.go b/flyteplugins/go/tasks/pluginmachinery/utils/transformers_test.go new file mode 100755 index 0000000000..17047211e0 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/utils/transformers_test.go @@ -0,0 +1,26 @@ +package utils + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestContains(t *testing.T) { + + assert.True(t, Contains([]string{"a", "b", "c"}, "b")) + + assert.False(t, Contains([]string{"a", "b", "c"}, "spark")) + + assert.False(t, Contains([]string{}, "spark")) + + assert.False(t, Contains(nil, "b")) +} + +func TestCopyMap(t *testing.T) { + assert.Nil(t, CopyMap(nil)) + m := map[string]string{ + "l": "v", + } + assert.Equal(t, m, CopyMap(m)) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/config.go b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config.go new file mode 100644 index 0000000000..9e330e4448 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config.go @@ -0,0 +1,55 @@ +package example + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + WebAPI: webapi.PluginConfig{ + ReadRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + Caching: webapi.CachingConfig{ + Size: 500000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + }, + ResourceMeta: nil, + }, + + ResourceConstraints: core.ResourceConstraintsSpec{ + ProjectScopeResourceConstraint: &core.ResourceConstraint{ + Value: 100, + }, + NamespaceScopeResourceConstraint: &core.ResourceConstraint{ + Value: 50, + }, + }, + } + + configSection = pluginsConfig.MustRegisterSubSection("admin", &defaultConfig) +) + +// The config object for this plugin. +type Config struct { + // Contains the default configs needed for the webapi base implementation. + WebAPI webapi.PluginConfig `json:"webApi" pflag:",Defines config for the base WebAPI plugin."` + ResourceConstraints core.ResourceConstraintsSpec `json:"resourceConstraints" pflag:"-,Defines resource constraints on how many executions to be created per project/overall at any given time."` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags.go b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags.go new file mode 100755 index 0000000000..e5b2f31a8f --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags.go @@ -0,0 +1,62 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package example + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.qps"), defaultConfig.WebAPI.ReadRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.burst"), defaultConfig.WebAPI.ReadRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.qps"), defaultConfig.WebAPI.WriteRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.burst"), defaultConfig.WebAPI.WriteRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.size"), defaultConfig.WebAPI.Caching.Size, "Defines the maximum number of items to cache.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "webApi.caching.resyncInterval"), defaultConfig.WebAPI.Caching.ResyncInterval.String(), "Defines the sync interval.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.workers"), defaultConfig.WebAPI.Caching.Workers, "Defines the number of workers to start up to process items.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.maxSystemFailures"), defaultConfig.WebAPI.Caching.MaxSystemFailures, "Defines the number of failures to fetch a task before failing the task.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags_test.go new file mode 100755 index 0000000000..3db0fcf7a3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_flags_test.go @@ -0,0 +1,214 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package example + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_webApi.readRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.readRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.size", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.size", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.size"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Size) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.resyncInterval", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultConfig.WebAPI.Caching.ResyncInterval.String() + + cmdFlags.Set("webApi.caching.resyncInterval", testValue) + if vString, err := cmdFlags.GetString("webApi.caching.resyncInterval"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.WebAPI.Caching.ResyncInterval) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.workers", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.workers"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.maxSystemFailures", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.maxSystemFailures", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.maxSystemFailures"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.MaxSystemFailures) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_test.go b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_test.go new file mode 100644 index 0000000000..6466d8179f --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/config_test.go @@ -0,0 +1,24 @@ +package example + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/config" + "github.com/flyteorg/flyte/v2/flytestdlib/config/viper" +) + +func TestGetConfig(t *testing.T) { + configAccessor := viper.NewAccessor(config.Options{ + StrictMode: true, + SearchPaths: []string{"testdata/admin_plugin.yaml"}, + }) + + err := configAccessor.UpdateConfig(context.TODO()) + assert.NoError(t, err) + + cfg := GetConfig() + assert.Len(t, cfg.WebAPI.ResourceQuotas, 1) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/plugin.go b/flyteplugins/go/tasks/pluginmachinery/webapi/example/plugin.go new file mode 100644 index 0000000000..acca2c87fa --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/plugin.go @@ -0,0 +1,120 @@ +package example + +import ( + "context" + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + ErrRemoteSystem errors.ErrorCode = "RemoteSystem" + ErrRemoteUser errors.ErrorCode = "RemoteUser" + ErrSystem errors.ErrorCode = "System" +) + +type Plugin struct { + metricScope promutils.Scope + cfg *Config +} + +func (p Plugin) GetConfig() webapi.PluginConfig { + return GetConfig().WebAPI +} + +func (p Plugin) ResourceRequirements(_ context.Context, _ webapi.TaskExecutionContextReader) ( + namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) { + + // Resource requirements are assumed to be the same. + return "default", p.cfg.ResourceConstraints, nil +} + +func (p Plugin) Create(ctx context.Context, tCtx webapi.TaskExecutionContextReader) (resourceMeta webapi.ResourceMeta, + resource webapi.Resource, err error) { + + // In the create method, your code should understand the request and translate the flyte task template data into a + // format your webAPI expects then make the WebAPI call. + + // This snippet retrieves the TaskTemplate and unmarshals the custom field into a plugin-protobuf. + //task, err := tCtx.TaskReader().Read(ctx) + //if err != nil { + // return nil, nil, err + //} + + //custom := task.GetCustom() + //myPluginProtoStruct := &plugins.MyPluginProtoStruct{} + //err = utils.UnmarshalStructToPb(custom, myPluginProtoStruct) + //if err != nil { + // return nil, nil, err + //} + + // The system will invoke this API at least once. In cases when a network partition/failure causes the system to + // fail persisting this response, the system will call this API again. If it returns an error, it'll be called up to + // the system-wide defined limit of retries with exponential backoff and jitter in between these trials + + return "my-request-id", nil, nil +} + +// Get the resource that matches the keys. If the plugin hits any failure, it should stop and return the failure. +// This API will be called asynchronously and periodically to update the set of tasks currently in progress. It's +// acceptable if this API is blocking since it'll be called from a background go-routine. +// Best practices: +// 1. Instead of returning the entire response object retrieved from the WebAPI, construct a smaller object that +// has enough information to construct the status/phase, error and/or output. +// 2. This object will NOT be serialized/marshaled. It's, therefore, not a requirement to make it so. +// 3. There is already client-side throttling in place. If the WebAPI returns a throttling error, you should return +// it as is so that the appropriate metrics are updated and the system administrator can update throttling +// params accordingly. +func (p Plugin) Get(ctx context.Context, tCtx webapi.GetContext) (latest webapi.Resource, err error) { + return "my-resource", nil +} + +// Delete the object in the remote service using the resource key. Flyte will call this API at least once. If the +// resource has already been deleted, the API should not fail. +func (p Plugin) Delete(ctx context.Context, tCtx webapi.DeleteContext) error { + return nil +} + +// Status checks the status of a given resource and translates it to a Flyte-understandable PhaseInfo. This API +// should avoid making any network calls and should run very efficiently. +func (p Plugin) Status(ctx context.Context, tCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { + tNow := time.Now() + return core.PhaseInfoSuccess(&core.TaskInfo{ + Logs: []*idlCore.TaskLog{ + { + Uri: "https://my-service/abc", + Name: "ServiceA Console", + }, + }, + OccurredAt: &tNow, + ExternalResources: []*core.ExternalResource{ + { + ExternalID: "abc", + }, + }, + }), nil +} + +func NewPlugin(ctx context.Context, cfg *Config, metricScope promutils.Scope) (Plugin, error) { + return Plugin{ + metricScope: metricScope, + cfg: cfg, + }, nil +} + +func init() { + pluginmachinery.PluginRegistry().RegisterRemotePlugin(webapi.PluginEntry{ + ID: "service-a", + SupportedTaskTypes: []core.TaskType{"my-task"}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + return NewPlugin(ctx, GetConfig(), iCtx.MetricsScope()) + }, + IsDefault: false, + DefaultForTaskTypes: []core.TaskType{"my-task"}, + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/example/testdata/admin_plugin.yaml b/flyteplugins/go/tasks/pluginmachinery/webapi/example/testdata/admin_plugin.yaml new file mode 100644 index 0000000000..9d8bb721a3 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/example/testdata/admin_plugin.yaml @@ -0,0 +1,5 @@ +plugins: + admin: + webApi: + resourceQuotas: + - "test": 1000 diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/async_plugin.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/async_plugin.go new file mode 100644 index 0000000000..90344cbd42 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/async_plugin.go @@ -0,0 +1,257 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" + + webapi "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" +) + +// AsyncPlugin is an autogenerated mock type for the AsyncPlugin type +type AsyncPlugin struct { + mock.Mock +} + +type AsyncPlugin_Create struct { + *mock.Call +} + +func (_m AsyncPlugin_Create) Return(resourceMeta interface{}, optionalResource interface{}, err error) *AsyncPlugin_Create { + return &AsyncPlugin_Create{Call: _m.Call.Return(resourceMeta, optionalResource, err)} +} + +func (_m *AsyncPlugin) OnCreate(ctx context.Context, tCtx webapi.TaskExecutionContextReader) *AsyncPlugin_Create { + c_call := _m.On("Create", ctx, tCtx) + return &AsyncPlugin_Create{Call: c_call} +} + +func (_m *AsyncPlugin) OnCreateMatch(matchers ...interface{}) *AsyncPlugin_Create { + c_call := _m.On("Create", matchers...) + return &AsyncPlugin_Create{Call: c_call} +} + +// Create provides a mock function with given fields: ctx, tCtx +func (_m *AsyncPlugin) Create(ctx context.Context, tCtx webapi.TaskExecutionContextReader) (interface{}, interface{}, error) { + ret := _m.Called(ctx, tCtx) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, webapi.TaskExecutionContextReader) interface{}); ok { + r0 = rf(ctx, tCtx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + var r1 interface{} + if rf, ok := ret.Get(1).(func(context.Context, webapi.TaskExecutionContextReader) interface{}); ok { + r1 = rf(ctx, tCtx) + } else { + if ret.Get(1) != nil { + r1 = ret.Get(1).(interface{}) + } + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, webapi.TaskExecutionContextReader) error); ok { + r2 = rf(ctx, tCtx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type AsyncPlugin_Delete struct { + *mock.Call +} + +func (_m AsyncPlugin_Delete) Return(_a0 error) *AsyncPlugin_Delete { + return &AsyncPlugin_Delete{Call: _m.Call.Return(_a0)} +} + +func (_m *AsyncPlugin) OnDelete(ctx context.Context, tCtx webapi.DeleteContext) *AsyncPlugin_Delete { + c_call := _m.On("Delete", ctx, tCtx) + return &AsyncPlugin_Delete{Call: c_call} +} + +func (_m *AsyncPlugin) OnDeleteMatch(matchers ...interface{}) *AsyncPlugin_Delete { + c_call := _m.On("Delete", matchers...) + return &AsyncPlugin_Delete{Call: c_call} +} + +// Delete provides a mock function with given fields: ctx, tCtx +func (_m *AsyncPlugin) Delete(ctx context.Context, tCtx webapi.DeleteContext) error { + ret := _m.Called(ctx, tCtx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, webapi.DeleteContext) error); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type AsyncPlugin_Get struct { + *mock.Call +} + +func (_m AsyncPlugin_Get) Return(latest interface{}, err error) *AsyncPlugin_Get { + return &AsyncPlugin_Get{Call: _m.Call.Return(latest, err)} +} + +func (_m *AsyncPlugin) OnGet(ctx context.Context, tCtx webapi.GetContext) *AsyncPlugin_Get { + c_call := _m.On("Get", ctx, tCtx) + return &AsyncPlugin_Get{Call: c_call} +} + +func (_m *AsyncPlugin) OnGetMatch(matchers ...interface{}) *AsyncPlugin_Get { + c_call := _m.On("Get", matchers...) + return &AsyncPlugin_Get{Call: c_call} +} + +// Get provides a mock function with given fields: ctx, tCtx +func (_m *AsyncPlugin) Get(ctx context.Context, tCtx webapi.GetContext) (interface{}, error) { + ret := _m.Called(ctx, tCtx) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(context.Context, webapi.GetContext) interface{}); ok { + r0 = rf(ctx, tCtx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, webapi.GetContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type AsyncPlugin_GetConfig struct { + *mock.Call +} + +func (_m AsyncPlugin_GetConfig) Return(_a0 webapi.PluginConfig) *AsyncPlugin_GetConfig { + return &AsyncPlugin_GetConfig{Call: _m.Call.Return(_a0)} +} + +func (_m *AsyncPlugin) OnGetConfig() *AsyncPlugin_GetConfig { + c_call := _m.On("GetConfig") + return &AsyncPlugin_GetConfig{Call: c_call} +} + +func (_m *AsyncPlugin) OnGetConfigMatch(matchers ...interface{}) *AsyncPlugin_GetConfig { + c_call := _m.On("GetConfig", matchers...) + return &AsyncPlugin_GetConfig{Call: c_call} +} + +// GetConfig provides a mock function with given fields: +func (_m *AsyncPlugin) GetConfig() webapi.PluginConfig { + ret := _m.Called() + + var r0 webapi.PluginConfig + if rf, ok := ret.Get(0).(func() webapi.PluginConfig); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(webapi.PluginConfig) + } + + return r0 +} + +type AsyncPlugin_ResourceRequirements struct { + *mock.Call +} + +func (_m AsyncPlugin_ResourceRequirements) Return(namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) *AsyncPlugin_ResourceRequirements { + return &AsyncPlugin_ResourceRequirements{Call: _m.Call.Return(namespace, constraints, err)} +} + +func (_m *AsyncPlugin) OnResourceRequirements(ctx context.Context, tCtx webapi.TaskExecutionContextReader) *AsyncPlugin_ResourceRequirements { + c_call := _m.On("ResourceRequirements", ctx, tCtx) + return &AsyncPlugin_ResourceRequirements{Call: c_call} +} + +func (_m *AsyncPlugin) OnResourceRequirementsMatch(matchers ...interface{}) *AsyncPlugin_ResourceRequirements { + c_call := _m.On("ResourceRequirements", matchers...) + return &AsyncPlugin_ResourceRequirements{Call: c_call} +} + +// ResourceRequirements provides a mock function with given fields: ctx, tCtx +func (_m *AsyncPlugin) ResourceRequirements(ctx context.Context, tCtx webapi.TaskExecutionContextReader) (core.ResourceNamespace, core.ResourceConstraintsSpec, error) { + ret := _m.Called(ctx, tCtx) + + var r0 core.ResourceNamespace + if rf, ok := ret.Get(0).(func(context.Context, webapi.TaskExecutionContextReader) core.ResourceNamespace); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Get(0).(core.ResourceNamespace) + } + + var r1 core.ResourceConstraintsSpec + if rf, ok := ret.Get(1).(func(context.Context, webapi.TaskExecutionContextReader) core.ResourceConstraintsSpec); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Get(1).(core.ResourceConstraintsSpec) + } + + var r2 error + if rf, ok := ret.Get(2).(func(context.Context, webapi.TaskExecutionContextReader) error); ok { + r2 = rf(ctx, tCtx) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type AsyncPlugin_Status struct { + *mock.Call +} + +func (_m AsyncPlugin_Status) Return(phase core.PhaseInfo, err error) *AsyncPlugin_Status { + return &AsyncPlugin_Status{Call: _m.Call.Return(phase, err)} +} + +func (_m *AsyncPlugin) OnStatus(ctx context.Context, tCtx webapi.StatusContext) *AsyncPlugin_Status { + c_call := _m.On("Status", ctx, tCtx) + return &AsyncPlugin_Status{Call: c_call} +} + +func (_m *AsyncPlugin) OnStatusMatch(matchers ...interface{}) *AsyncPlugin_Status { + c_call := _m.On("Status", matchers...) + return &AsyncPlugin_Status{Call: c_call} +} + +// Status provides a mock function with given fields: ctx, tCtx +func (_m *AsyncPlugin) Status(ctx context.Context, tCtx webapi.StatusContext) (core.PhaseInfo, error) { + ret := _m.Called(ctx, tCtx) + + var r0 core.PhaseInfo + if rf, ok := ret.Get(0).(func(context.Context, webapi.StatusContext) core.PhaseInfo); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Get(0).(core.PhaseInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, webapi.StatusContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/delete_context.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/delete_context.go new file mode 100644 index 0000000000..c6be7e5c60 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/delete_context.go @@ -0,0 +1,76 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// DeleteContext is an autogenerated mock type for the DeleteContext type +type DeleteContext struct { + mock.Mock +} + +type DeleteContext_Reason struct { + *mock.Call +} + +func (_m DeleteContext_Reason) Return(_a0 string) *DeleteContext_Reason { + return &DeleteContext_Reason{Call: _m.Call.Return(_a0)} +} + +func (_m *DeleteContext) OnReason() *DeleteContext_Reason { + c_call := _m.On("Reason") + return &DeleteContext_Reason{Call: c_call} +} + +func (_m *DeleteContext) OnReasonMatch(matchers ...interface{}) *DeleteContext_Reason { + c_call := _m.On("Reason", matchers...) + return &DeleteContext_Reason{Call: c_call} +} + +// Reason provides a mock function with given fields: +func (_m *DeleteContext) Reason() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type DeleteContext_ResourceMeta struct { + *mock.Call +} + +func (_m DeleteContext_ResourceMeta) Return(_a0 interface{}) *DeleteContext_ResourceMeta { + return &DeleteContext_ResourceMeta{Call: _m.Call.Return(_a0)} +} + +func (_m *DeleteContext) OnResourceMeta() *DeleteContext_ResourceMeta { + c_call := _m.On("ResourceMeta") + return &DeleteContext_ResourceMeta{Call: c_call} +} + +func (_m *DeleteContext) OnResourceMetaMatch(matchers ...interface{}) *DeleteContext_ResourceMeta { + c_call := _m.On("ResourceMeta", matchers...) + return &DeleteContext_ResourceMeta{Call: c_call} +} + +// ResourceMeta provides a mock function with given fields: +func (_m *DeleteContext) ResourceMeta() interface{} { + ret := _m.Called() + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/get_context.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/get_context.go new file mode 100644 index 0000000000..c0dcbcd19e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/get_context.go @@ -0,0 +1,44 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// GetContext is an autogenerated mock type for the GetContext type +type GetContext struct { + mock.Mock +} + +type GetContext_ResourceMeta struct { + *mock.Call +} + +func (_m GetContext_ResourceMeta) Return(_a0 interface{}) *GetContext_ResourceMeta { + return &GetContext_ResourceMeta{Call: _m.Call.Return(_a0)} +} + +func (_m *GetContext) OnResourceMeta() *GetContext_ResourceMeta { + c_call := _m.On("ResourceMeta") + return &GetContext_ResourceMeta{Call: c_call} +} + +func (_m *GetContext) OnResourceMetaMatch(matchers ...interface{}) *GetContext_ResourceMeta { + c_call := _m.On("ResourceMeta", matchers...) + return &GetContext_ResourceMeta{Call: c_call} +} + +// ResourceMeta provides a mock function with given fields: +func (_m *GetContext) ResourceMeta() interface{} { + ret := _m.Called() + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin.go new file mode 100644 index 0000000000..0367277c92 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin.go @@ -0,0 +1,45 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + webapi "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + mock "github.com/stretchr/testify/mock" +) + +// Plugin is an autogenerated mock type for the Plugin type +type Plugin struct { + mock.Mock +} + +type Plugin_GetConfig struct { + *mock.Call +} + +func (_m Plugin_GetConfig) Return(_a0 webapi.PluginConfig) *Plugin_GetConfig { + return &Plugin_GetConfig{Call: _m.Call.Return(_a0)} +} + +func (_m *Plugin) OnGetConfig() *Plugin_GetConfig { + c_call := _m.On("GetConfig") + return &Plugin_GetConfig{Call: c_call} +} + +func (_m *Plugin) OnGetConfigMatch(matchers ...interface{}) *Plugin_GetConfig { + c_call := _m.On("GetConfig", matchers...) + return &Plugin_GetConfig{Call: c_call} +} + +// GetConfig provides a mock function with given fields: +func (_m *Plugin) GetConfig() webapi.PluginConfig { + ret := _m.Called() + + var r0 webapi.PluginConfig + if rf, ok := ret.Get(0).(func() webapi.PluginConfig); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(webapi.PluginConfig) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin_setup_context.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin_setup_context.go new file mode 100644 index 0000000000..ce6d25c293 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/plugin_setup_context.go @@ -0,0 +1,47 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + promutils "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + mock "github.com/stretchr/testify/mock" +) + +// PluginSetupContext is an autogenerated mock type for the PluginSetupContext type +type PluginSetupContext struct { + mock.Mock +} + +type PluginSetupContext_MetricsScope struct { + *mock.Call +} + +func (_m PluginSetupContext_MetricsScope) Return(_a0 promutils.Scope) *PluginSetupContext_MetricsScope { + return &PluginSetupContext_MetricsScope{Call: _m.Call.Return(_a0)} +} + +func (_m *PluginSetupContext) OnMetricsScope() *PluginSetupContext_MetricsScope { + c_call := _m.On("MetricsScope") + return &PluginSetupContext_MetricsScope{Call: c_call} +} + +func (_m *PluginSetupContext) OnMetricsScopeMatch(matchers ...interface{}) *PluginSetupContext_MetricsScope { + c_call := _m.On("MetricsScope", matchers...) + return &PluginSetupContext_MetricsScope{Call: c_call} +} + +// MetricsScope provides a mock function with given fields: +func (_m *PluginSetupContext) MetricsScope() promutils.Scope { + ret := _m.Called() + + var r0 promutils.Scope + if rf, ok := ret.Get(0).(func() promutils.Scope); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(promutils.Scope) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/remote_resource.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/remote_resource.go new file mode 100644 index 0000000000..5770fab126 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/remote_resource.go @@ -0,0 +1,10 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// RemoteResource is an autogenerated mock type for the RemoteResource type +type RemoteResource struct { + mock.Mock +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/resource.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/resource.go new file mode 100644 index 0000000000..2b27b82324 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/resource.go @@ -0,0 +1,84 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" +) + +// Resource is an autogenerated mock type for the Resource type +type Resource struct { + mock.Mock +} + +type Resource_ID struct { + *mock.Call +} + +func (_m Resource_ID) Return(_a0 string) *Resource_ID { + return &Resource_ID{Call: _m.Call.Return(_a0)} +} + +func (_m *Resource) OnID() *Resource_ID { + c := _m.On("ID") + return &Resource_ID{Call: c} +} + +func (_m *Resource) OnIDMatch(matchers ...interface{}) *Resource_ID { + c := _m.On("ID", matchers...) + return &Resource_ID{Call: c} +} + +// ID provides a mock function with given fields: +func (_m *Resource) ID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type Resource_Status struct { + *mock.Call +} + +func (_m Resource_Status) Return(phase core.PhaseInfo, err error) *Resource_Status { + return &Resource_Status{Call: _m.Call.Return(phase, err)} +} + +func (_m *Resource) OnStatus() *Resource_Status { + c := _m.On("Status") + return &Resource_Status{Call: c} +} + +func (_m *Resource) OnStatusMatch(matchers ...interface{}) *Resource_Status { + c := _m.On("Status", matchers...) + return &Resource_Status{Call: c} +} + +// Status provides a mock function with given fields: +func (_m *Resource) Status() (core.PhaseInfo, error) { + ret := _m.Called() + + var r0 core.PhaseInfo + if rf, ok := ret.Get(0).(func() core.PhaseInfo); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(core.PhaseInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/status_context.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/status_context.go new file mode 100644 index 0000000000..1c6e086a70 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/status_context.go @@ -0,0 +1,289 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// StatusContext is an autogenerated mock type for the StatusContext type +type StatusContext struct { + mock.Mock +} + +type StatusContext_DataStore struct { + *mock.Call +} + +func (_m StatusContext_DataStore) Return(_a0 *storage.DataStore) *StatusContext_DataStore { + return &StatusContext_DataStore{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnDataStore() *StatusContext_DataStore { + c_call := _m.On("DataStore") + return &StatusContext_DataStore{Call: c_call} +} + +func (_m *StatusContext) OnDataStoreMatch(matchers ...interface{}) *StatusContext_DataStore { + c_call := _m.On("DataStore", matchers...) + return &StatusContext_DataStore{Call: c_call} +} + +// DataStore provides a mock function with given fields: +func (_m *StatusContext) DataStore() *storage.DataStore { + ret := _m.Called() + + var r0 *storage.DataStore + if rf, ok := ret.Get(0).(func() *storage.DataStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storage.DataStore) + } + } + + return r0 +} + +type StatusContext_InputReader struct { + *mock.Call +} + +func (_m StatusContext_InputReader) Return(_a0 io.InputReader) *StatusContext_InputReader { + return &StatusContext_InputReader{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnInputReader() *StatusContext_InputReader { + c_call := _m.On("InputReader") + return &StatusContext_InputReader{Call: c_call} +} + +func (_m *StatusContext) OnInputReaderMatch(matchers ...interface{}) *StatusContext_InputReader { + c_call := _m.On("InputReader", matchers...) + return &StatusContext_InputReader{Call: c_call} +} + +// InputReader provides a mock function with given fields: +func (_m *StatusContext) InputReader() io.InputReader { + ret := _m.Called() + + var r0 io.InputReader + if rf, ok := ret.Get(0).(func() io.InputReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.InputReader) + } + } + + return r0 +} + +type StatusContext_OutputWriter struct { + *mock.Call +} + +func (_m StatusContext_OutputWriter) Return(_a0 io.OutputWriter) *StatusContext_OutputWriter { + return &StatusContext_OutputWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnOutputWriter() *StatusContext_OutputWriter { + c_call := _m.On("OutputWriter") + return &StatusContext_OutputWriter{Call: c_call} +} + +func (_m *StatusContext) OnOutputWriterMatch(matchers ...interface{}) *StatusContext_OutputWriter { + c_call := _m.On("OutputWriter", matchers...) + return &StatusContext_OutputWriter{Call: c_call} +} + +// OutputWriter provides a mock function with given fields: +func (_m *StatusContext) OutputWriter() io.OutputWriter { + ret := _m.Called() + + var r0 io.OutputWriter + if rf, ok := ret.Get(0).(func() io.OutputWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.OutputWriter) + } + } + + return r0 +} + +type StatusContext_Resource struct { + *mock.Call +} + +func (_m StatusContext_Resource) Return(_a0 interface{}) *StatusContext_Resource { + return &StatusContext_Resource{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnResource() *StatusContext_Resource { + c_call := _m.On("Resource") + return &StatusContext_Resource{Call: c_call} +} + +func (_m *StatusContext) OnResourceMatch(matchers ...interface{}) *StatusContext_Resource { + c_call := _m.On("Resource", matchers...) + return &StatusContext_Resource{Call: c_call} +} + +// Resource provides a mock function with given fields: +func (_m *StatusContext) Resource() interface{} { + ret := _m.Called() + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +type StatusContext_ResourceMeta struct { + *mock.Call +} + +func (_m StatusContext_ResourceMeta) Return(_a0 interface{}) *StatusContext_ResourceMeta { + return &StatusContext_ResourceMeta{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnResourceMeta() *StatusContext_ResourceMeta { + c_call := _m.On("ResourceMeta") + return &StatusContext_ResourceMeta{Call: c_call} +} + +func (_m *StatusContext) OnResourceMetaMatch(matchers ...interface{}) *StatusContext_ResourceMeta { + c_call := _m.On("ResourceMeta", matchers...) + return &StatusContext_ResourceMeta{Call: c_call} +} + +// ResourceMeta provides a mock function with given fields: +func (_m *StatusContext) ResourceMeta() interface{} { + ret := _m.Called() + + var r0 interface{} + if rf, ok := ret.Get(0).(func() interface{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +type StatusContext_SecretManager struct { + *mock.Call +} + +func (_m StatusContext_SecretManager) Return(_a0 core.SecretManager) *StatusContext_SecretManager { + return &StatusContext_SecretManager{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnSecretManager() *StatusContext_SecretManager { + c_call := _m.On("SecretManager") + return &StatusContext_SecretManager{Call: c_call} +} + +func (_m *StatusContext) OnSecretManagerMatch(matchers ...interface{}) *StatusContext_SecretManager { + c_call := _m.On("SecretManager", matchers...) + return &StatusContext_SecretManager{Call: c_call} +} + +// SecretManager provides a mock function with given fields: +func (_m *StatusContext) SecretManager() core.SecretManager { + ret := _m.Called() + + var r0 core.SecretManager + if rf, ok := ret.Get(0).(func() core.SecretManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SecretManager) + } + } + + return r0 +} + +type StatusContext_TaskExecutionMetadata struct { + *mock.Call +} + +func (_m StatusContext_TaskExecutionMetadata) Return(_a0 core.TaskExecutionMetadata) *StatusContext_TaskExecutionMetadata { + return &StatusContext_TaskExecutionMetadata{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnTaskExecutionMetadata() *StatusContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata") + return &StatusContext_TaskExecutionMetadata{Call: c_call} +} + +func (_m *StatusContext) OnTaskExecutionMetadataMatch(matchers ...interface{}) *StatusContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata", matchers...) + return &StatusContext_TaskExecutionMetadata{Call: c_call} +} + +// TaskExecutionMetadata provides a mock function with given fields: +func (_m *StatusContext) TaskExecutionMetadata() core.TaskExecutionMetadata { + ret := _m.Called() + + var r0 core.TaskExecutionMetadata + if rf, ok := ret.Get(0).(func() core.TaskExecutionMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionMetadata) + } + } + + return r0 +} + +type StatusContext_TaskReader struct { + *mock.Call +} + +func (_m StatusContext_TaskReader) Return(_a0 core.TaskReader) *StatusContext_TaskReader { + return &StatusContext_TaskReader{Call: _m.Call.Return(_a0)} +} + +func (_m *StatusContext) OnTaskReader() *StatusContext_TaskReader { + c_call := _m.On("TaskReader") + return &StatusContext_TaskReader{Call: c_call} +} + +func (_m *StatusContext) OnTaskReaderMatch(matchers ...interface{}) *StatusContext_TaskReader { + c_call := _m.On("TaskReader", matchers...) + return &StatusContext_TaskReader{Call: c_call} +} + +// TaskReader provides a mock function with given fields: +func (_m *StatusContext) TaskReader() core.TaskReader { + ret := _m.Called() + + var r0 core.TaskReader + if rf, ok := ret.Get(0).(func() core.TaskReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskReader) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/sync_plugin.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/sync_plugin.go new file mode 100644 index 0000000000..d5934b21ba --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/sync_plugin.go @@ -0,0 +1,88 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + mock "github.com/stretchr/testify/mock" + + webapi "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" +) + +// SyncPlugin is an autogenerated mock type for the SyncPlugin type +type SyncPlugin struct { + mock.Mock +} + +type SyncPlugin_Do struct { + *mock.Call +} + +func (_m SyncPlugin_Do) Return(phase core.PhaseInfo, err error) *SyncPlugin_Do { + return &SyncPlugin_Do{Call: _m.Call.Return(phase, err)} +} + +func (_m *SyncPlugin) OnDo(ctx context.Context, tCtx webapi.TaskExecutionContext) *SyncPlugin_Do { + c_call := _m.On("Do", ctx, tCtx) + return &SyncPlugin_Do{Call: c_call} +} + +func (_m *SyncPlugin) OnDoMatch(matchers ...interface{}) *SyncPlugin_Do { + c_call := _m.On("Do", matchers...) + return &SyncPlugin_Do{Call: c_call} +} + +// Do provides a mock function with given fields: ctx, tCtx +func (_m *SyncPlugin) Do(ctx context.Context, tCtx webapi.TaskExecutionContext) (core.PhaseInfo, error) { + ret := _m.Called(ctx, tCtx) + + var r0 core.PhaseInfo + if rf, ok := ret.Get(0).(func(context.Context, webapi.TaskExecutionContext) core.PhaseInfo); ok { + r0 = rf(ctx, tCtx) + } else { + r0 = ret.Get(0).(core.PhaseInfo) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, webapi.TaskExecutionContext) error); ok { + r1 = rf(ctx, tCtx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +type SyncPlugin_GetConfig struct { + *mock.Call +} + +func (_m SyncPlugin_GetConfig) Return(_a0 webapi.PluginConfig) *SyncPlugin_GetConfig { + return &SyncPlugin_GetConfig{Call: _m.Call.Return(_a0)} +} + +func (_m *SyncPlugin) OnGetConfig() *SyncPlugin_GetConfig { + c_call := _m.On("GetConfig") + return &SyncPlugin_GetConfig{Call: c_call} +} + +func (_m *SyncPlugin) OnGetConfigMatch(matchers ...interface{}) *SyncPlugin_GetConfig { + c_call := _m.On("GetConfig", matchers...) + return &SyncPlugin_GetConfig{Call: c_call} +} + +// GetConfig provides a mock function with given fields: +func (_m *SyncPlugin) GetConfig() webapi.PluginConfig { + ret := _m.Called() + + var r0 webapi.PluginConfig + if rf, ok := ret.Get(0).(func() webapi.PluginConfig); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(webapi.PluginConfig) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context.go new file mode 100644 index 0000000000..648a3d6bbd --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context.go @@ -0,0 +1,221 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" + + storage "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +// TaskExecutionContext is an autogenerated mock type for the TaskExecutionContext type +type TaskExecutionContext struct { + mock.Mock +} + +type TaskExecutionContext_DataStore struct { + *mock.Call +} + +func (_m TaskExecutionContext_DataStore) Return(_a0 *storage.DataStore) *TaskExecutionContext_DataStore { + return &TaskExecutionContext_DataStore{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnDataStore() *TaskExecutionContext_DataStore { + c_call := _m.On("DataStore") + return &TaskExecutionContext_DataStore{Call: c_call} +} + +func (_m *TaskExecutionContext) OnDataStoreMatch(matchers ...interface{}) *TaskExecutionContext_DataStore { + c_call := _m.On("DataStore", matchers...) + return &TaskExecutionContext_DataStore{Call: c_call} +} + +// DataStore provides a mock function with given fields: +func (_m *TaskExecutionContext) DataStore() *storage.DataStore { + ret := _m.Called() + + var r0 *storage.DataStore + if rf, ok := ret.Get(0).(func() *storage.DataStore); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*storage.DataStore) + } + } + + return r0 +} + +type TaskExecutionContext_InputReader struct { + *mock.Call +} + +func (_m TaskExecutionContext_InputReader) Return(_a0 io.InputReader) *TaskExecutionContext_InputReader { + return &TaskExecutionContext_InputReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnInputReader() *TaskExecutionContext_InputReader { + c_call := _m.On("InputReader") + return &TaskExecutionContext_InputReader{Call: c_call} +} + +func (_m *TaskExecutionContext) OnInputReaderMatch(matchers ...interface{}) *TaskExecutionContext_InputReader { + c_call := _m.On("InputReader", matchers...) + return &TaskExecutionContext_InputReader{Call: c_call} +} + +// InputReader provides a mock function with given fields: +func (_m *TaskExecutionContext) InputReader() io.InputReader { + ret := _m.Called() + + var r0 io.InputReader + if rf, ok := ret.Get(0).(func() io.InputReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.InputReader) + } + } + + return r0 +} + +type TaskExecutionContext_OutputWriter struct { + *mock.Call +} + +func (_m TaskExecutionContext_OutputWriter) Return(_a0 io.OutputWriter) *TaskExecutionContext_OutputWriter { + return &TaskExecutionContext_OutputWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnOutputWriter() *TaskExecutionContext_OutputWriter { + c_call := _m.On("OutputWriter") + return &TaskExecutionContext_OutputWriter{Call: c_call} +} + +func (_m *TaskExecutionContext) OnOutputWriterMatch(matchers ...interface{}) *TaskExecutionContext_OutputWriter { + c_call := _m.On("OutputWriter", matchers...) + return &TaskExecutionContext_OutputWriter{Call: c_call} +} + +// OutputWriter provides a mock function with given fields: +func (_m *TaskExecutionContext) OutputWriter() io.OutputWriter { + ret := _m.Called() + + var r0 io.OutputWriter + if rf, ok := ret.Get(0).(func() io.OutputWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.OutputWriter) + } + } + + return r0 +} + +type TaskExecutionContext_SecretManager struct { + *mock.Call +} + +func (_m TaskExecutionContext_SecretManager) Return(_a0 core.SecretManager) *TaskExecutionContext_SecretManager { + return &TaskExecutionContext_SecretManager{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnSecretManager() *TaskExecutionContext_SecretManager { + c_call := _m.On("SecretManager") + return &TaskExecutionContext_SecretManager{Call: c_call} +} + +func (_m *TaskExecutionContext) OnSecretManagerMatch(matchers ...interface{}) *TaskExecutionContext_SecretManager { + c_call := _m.On("SecretManager", matchers...) + return &TaskExecutionContext_SecretManager{Call: c_call} +} + +// SecretManager provides a mock function with given fields: +func (_m *TaskExecutionContext) SecretManager() core.SecretManager { + ret := _m.Called() + + var r0 core.SecretManager + if rf, ok := ret.Get(0).(func() core.SecretManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SecretManager) + } + } + + return r0 +} + +type TaskExecutionContext_TaskExecutionMetadata struct { + *mock.Call +} + +func (_m TaskExecutionContext_TaskExecutionMetadata) Return(_a0 core.TaskExecutionMetadata) *TaskExecutionContext_TaskExecutionMetadata { + return &TaskExecutionContext_TaskExecutionMetadata{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnTaskExecutionMetadata() *TaskExecutionContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata") + return &TaskExecutionContext_TaskExecutionMetadata{Call: c_call} +} + +func (_m *TaskExecutionContext) OnTaskExecutionMetadataMatch(matchers ...interface{}) *TaskExecutionContext_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata", matchers...) + return &TaskExecutionContext_TaskExecutionMetadata{Call: c_call} +} + +// TaskExecutionMetadata provides a mock function with given fields: +func (_m *TaskExecutionContext) TaskExecutionMetadata() core.TaskExecutionMetadata { + ret := _m.Called() + + var r0 core.TaskExecutionMetadata + if rf, ok := ret.Get(0).(func() core.TaskExecutionMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionMetadata) + } + } + + return r0 +} + +type TaskExecutionContext_TaskReader struct { + *mock.Call +} + +func (_m TaskExecutionContext_TaskReader) Return(_a0 core.TaskReader) *TaskExecutionContext_TaskReader { + return &TaskExecutionContext_TaskReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContext) OnTaskReader() *TaskExecutionContext_TaskReader { + c_call := _m.On("TaskReader") + return &TaskExecutionContext_TaskReader{Call: c_call} +} + +func (_m *TaskExecutionContext) OnTaskReaderMatch(matchers ...interface{}) *TaskExecutionContext_TaskReader { + c_call := _m.On("TaskReader", matchers...) + return &TaskExecutionContext_TaskReader{Call: c_call} +} + +// TaskReader provides a mock function with given fields: +func (_m *TaskExecutionContext) TaskReader() core.TaskReader { + ret := _m.Called() + + var r0 core.TaskReader + if rf, ok := ret.Get(0).(func() core.TaskReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskReader) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context_reader.go b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context_reader.go new file mode 100644 index 0000000000..2832e2ef08 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/mocks/task_execution_context_reader.go @@ -0,0 +1,185 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + core "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + io "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + + mock "github.com/stretchr/testify/mock" +) + +// TaskExecutionContextReader is an autogenerated mock type for the TaskExecutionContextReader type +type TaskExecutionContextReader struct { + mock.Mock +} + +type TaskExecutionContextReader_InputReader struct { + *mock.Call +} + +func (_m TaskExecutionContextReader_InputReader) Return(_a0 io.InputReader) *TaskExecutionContextReader_InputReader { + return &TaskExecutionContextReader_InputReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContextReader) OnInputReader() *TaskExecutionContextReader_InputReader { + c_call := _m.On("InputReader") + return &TaskExecutionContextReader_InputReader{Call: c_call} +} + +func (_m *TaskExecutionContextReader) OnInputReaderMatch(matchers ...interface{}) *TaskExecutionContextReader_InputReader { + c_call := _m.On("InputReader", matchers...) + return &TaskExecutionContextReader_InputReader{Call: c_call} +} + +// InputReader provides a mock function with given fields: +func (_m *TaskExecutionContextReader) InputReader() io.InputReader { + ret := _m.Called() + + var r0 io.InputReader + if rf, ok := ret.Get(0).(func() io.InputReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.InputReader) + } + } + + return r0 +} + +type TaskExecutionContextReader_OutputWriter struct { + *mock.Call +} + +func (_m TaskExecutionContextReader_OutputWriter) Return(_a0 io.OutputWriter) *TaskExecutionContextReader_OutputWriter { + return &TaskExecutionContextReader_OutputWriter{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContextReader) OnOutputWriter() *TaskExecutionContextReader_OutputWriter { + c_call := _m.On("OutputWriter") + return &TaskExecutionContextReader_OutputWriter{Call: c_call} +} + +func (_m *TaskExecutionContextReader) OnOutputWriterMatch(matchers ...interface{}) *TaskExecutionContextReader_OutputWriter { + c_call := _m.On("OutputWriter", matchers...) + return &TaskExecutionContextReader_OutputWriter{Call: c_call} +} + +// OutputWriter provides a mock function with given fields: +func (_m *TaskExecutionContextReader) OutputWriter() io.OutputWriter { + ret := _m.Called() + + var r0 io.OutputWriter + if rf, ok := ret.Get(0).(func() io.OutputWriter); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.OutputWriter) + } + } + + return r0 +} + +type TaskExecutionContextReader_SecretManager struct { + *mock.Call +} + +func (_m TaskExecutionContextReader_SecretManager) Return(_a0 core.SecretManager) *TaskExecutionContextReader_SecretManager { + return &TaskExecutionContextReader_SecretManager{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContextReader) OnSecretManager() *TaskExecutionContextReader_SecretManager { + c_call := _m.On("SecretManager") + return &TaskExecutionContextReader_SecretManager{Call: c_call} +} + +func (_m *TaskExecutionContextReader) OnSecretManagerMatch(matchers ...interface{}) *TaskExecutionContextReader_SecretManager { + c_call := _m.On("SecretManager", matchers...) + return &TaskExecutionContextReader_SecretManager{Call: c_call} +} + +// SecretManager provides a mock function with given fields: +func (_m *TaskExecutionContextReader) SecretManager() core.SecretManager { + ret := _m.Called() + + var r0 core.SecretManager + if rf, ok := ret.Get(0).(func() core.SecretManager); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.SecretManager) + } + } + + return r0 +} + +type TaskExecutionContextReader_TaskExecutionMetadata struct { + *mock.Call +} + +func (_m TaskExecutionContextReader_TaskExecutionMetadata) Return(_a0 core.TaskExecutionMetadata) *TaskExecutionContextReader_TaskExecutionMetadata { + return &TaskExecutionContextReader_TaskExecutionMetadata{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContextReader) OnTaskExecutionMetadata() *TaskExecutionContextReader_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata") + return &TaskExecutionContextReader_TaskExecutionMetadata{Call: c_call} +} + +func (_m *TaskExecutionContextReader) OnTaskExecutionMetadataMatch(matchers ...interface{}) *TaskExecutionContextReader_TaskExecutionMetadata { + c_call := _m.On("TaskExecutionMetadata", matchers...) + return &TaskExecutionContextReader_TaskExecutionMetadata{Call: c_call} +} + +// TaskExecutionMetadata provides a mock function with given fields: +func (_m *TaskExecutionContextReader) TaskExecutionMetadata() core.TaskExecutionMetadata { + ret := _m.Called() + + var r0 core.TaskExecutionMetadata + if rf, ok := ret.Get(0).(func() core.TaskExecutionMetadata); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskExecutionMetadata) + } + } + + return r0 +} + +type TaskExecutionContextReader_TaskReader struct { + *mock.Call +} + +func (_m TaskExecutionContextReader_TaskReader) Return(_a0 core.TaskReader) *TaskExecutionContextReader_TaskReader { + return &TaskExecutionContextReader_TaskReader{Call: _m.Call.Return(_a0)} +} + +func (_m *TaskExecutionContextReader) OnTaskReader() *TaskExecutionContextReader_TaskReader { + c_call := _m.On("TaskReader") + return &TaskExecutionContextReader_TaskReader{Call: c_call} +} + +func (_m *TaskExecutionContextReader) OnTaskReaderMatch(matchers ...interface{}) *TaskExecutionContextReader_TaskReader { + c_call := _m.On("TaskReader", matchers...) + return &TaskExecutionContextReader_TaskReader{Call: c_call} +} + +// TaskReader provides a mock function with given fields: +func (_m *TaskExecutionContextReader) TaskReader() core.TaskReader { + ret := _m.Called() + + var r0 core.TaskReader + if rf, ok := ret.Get(0).(func() core.TaskReader); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(core.TaskReader) + } + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/plugin.go b/flyteplugins/go/tasks/pluginmachinery/webapi/plugin.go new file mode 100644 index 0000000000..801441c456 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/plugin.go @@ -0,0 +1,149 @@ +// Defines the interfaces to implement to add a Web API Plugin (AsyncPlugin and SyncPlugin) to the Flyte system. A +// WebAPI plugin is a plugin that runs the compute for a task on a separate system through a web call (REST/Grpc... +// etc.). By implementing either the AsyncPlugin or SyncPlugin interfaces, the users of the Flyte system can then +// declare tasks of the handled task type in their workflows and the engine (Propeller) will route these tasks to your +// plugin to interact with the remote system. +// +// A sample skeleton plugin is defined in the example directory. +package webapi + +import ( + "context" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" +) + +//go:generate mockery -all -case=underscore + +// A Lazy loading function, that will load the plugin. Plugins should be initialized in this method. It is guaranteed +// that the plugin loader will be called before any Handle/Abort/Finalize functions are invoked +type PluginLoader func(ctx context.Context, iCtx PluginSetupContext) (AsyncPlugin, error) + +// PluginEntry is a structure that is used to indicate to the system a WebAPI plugin +type PluginEntry struct { + // ID/Name of the plugin. This will be used to identify this plugin and has to be unique in the entire system + // All functions like enabling and disabling a plugin use this ID + ID pluginsCore.TaskType + + // A list of all the task types for which this plugin is applicable. + SupportedTaskTypes []pluginsCore.TaskType + + // An instance of the plugin + PluginLoader PluginLoader + + // Boolean that indicates if this plugin can be used as the default for unknown task types. There can only be + // one default in the system + IsDefault bool + + // A list of all task types for which this plugin should be default handler when multiple registered plugins + // support the same task type. This must be a subset of RegisteredTaskTypes and at most one default per task type + // is supported. + DefaultForTaskTypes []pluginsCore.TaskType +} + +// PluginSetupContext is the interface made available to the plugin loader when initializing the plugin. +type PluginSetupContext interface { + // a metrics scope to publish stats under + MetricsScope() promutils.Scope +} + +type TaskExecutionContextReader interface { + // Returns a secret manager that can retrieve configured secrets for this plugin + SecretManager() pluginsCore.SecretManager + + // Returns a TaskReader, to retrieve the task details + TaskReader() pluginsCore.TaskReader + + // Returns an input reader to retrieve input data + InputReader() io.InputReader + + // Returns a handle to the Task's execution metadata. + TaskExecutionMetadata() pluginsCore.TaskExecutionMetadata + + // Provides an output sync of type io.OutputWriter + OutputWriter() io.OutputWriter +} + +type TaskExecutionContext interface { + TaskExecutionContextReader + + // Provides the raw datastore to enable persisting outputs. + DataStore() *storage.DataStore +} + +type GetContext interface { + ResourceMeta() ResourceMeta +} + +type DeleteContext interface { + ResourceMeta() ResourceMeta + Reason() string +} + +type StatusContext interface { + TaskExecutionContext + + ResourceMeta() ResourceMeta + Resource() Resource +} + +// Metadata about the resource to be synced from the remote service. +type ResourceMeta = interface{} +type Resource = interface{} + +// AsyncPlugin defines the interface for plugins that call Async Web APIs. +type AsyncPlugin interface { + // GetConfig gets the loaded plugin config. This will be used to control the interactions with the remote service. + GetConfig() PluginConfig + + // ResourceRequirements analyzes the task to execute and determines the ResourceNamespace to be used when allocating tokens. + ResourceRequirements(ctx context.Context, tCtx TaskExecutionContextReader) ( + namespace pluginsCore.ResourceNamespace, constraints pluginsCore.ResourceConstraintsSpec, err error) + + // Create a new resource using the TaskExecutionContext provided. Ideally, the remote service uses the name in the + // TaskExecutionMetadata to launch the resource in an idempotent fashion. This function will be on the critical path + // of the execution of a workflow and therefore it should not do expensive operations before making the webAPI call. + // Flyte will call this api at least once. It's important that the callee service is idempotent to ensure no + // resource leakage or duplicate requests. Flyte has an in-memory cache that does a best effort idempotency + // guarantee. + // It's required to return a resourceMeta object (that will be cached in memory). In case the remote API returns the + // actually created resource, it's advisable to also return that in optionalResource output parameter. Doing so will + // instruct the system to call Status() immediately after Create() and potentially terminate early if the resource + // has already been executed/failed. + // If the remote API failed due to a system error (network failure, timeout... etc.), the plugin should return a + // non-nil error. The system will automatically retry the operation based on the plugin config. + Create(ctx context.Context, tCtx TaskExecutionContextReader) (resourceMeta ResourceMeta, optionalResource Resource, + err error) + + // Get the resource that matches the keys. If the plugin hits any failure, it should stop and return the failure. + // This API will be called asynchronously and periodically to update the set of tasks currently in progress. It's + // acceptable if this API is blocking since it'll be called from a background go-routine. + // Best practices: + // 1) Instead of returning the entire response object retrieved from the WebAPI, construct a smaller object that + // has enough information to construct the status/phase, error and/or output. + // 2) This object will NOT be serialized/marshaled. It's, therefore, not a requirement to make it so. + // 3) There is already client-side throttling in place. If the WebAPI returns a throttling error, you should return + // it as is so that the appropriate metrics are updated and the system administrator can update throttling + // params accordingly. + Get(ctx context.Context, tCtx GetContext) (latest Resource, err error) + + // Delete the object in the remote service using the resource key. Flyte will call this API at least once. If the + // resource has already been deleted, the API should not fail. + Delete(ctx context.Context, tCtx DeleteContext) error + + // Status checks the status of a given resource and translates it to a Flyte-understandable PhaseInfo. This API + // should avoid making any network calls and should run very efficiently. + Status(ctx context.Context, tCtx StatusContext) (phase pluginsCore.PhaseInfo, err error) +} + +// SyncPlugin defines the interface for plugins that call Web APIs synchronously. +type SyncPlugin interface { + // GetConfig gets the loaded plugin config. This will be used to control the interactions with the remote service. + GetConfig() PluginConfig + + // Do performs the action associated with this plugin. + Do(ctx context.Context, tCtx TaskExecutionContext) (phase pluginsCore.PhaseInfo, err error) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig.go b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig.go new file mode 100644 index 0000000000..e7ed883d0f --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig.go @@ -0,0 +1,68 @@ +package webapi + +import ( + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags PluginConfig --default-var=DefaultPluginConfig + +var ( + DefaultPluginConfig = PluginConfig{ + Caching: CachingConfig{ + Size: 100000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + MaxSystemFailures: 5, + }, + ReadRateLimiter: RateLimiterConfig{ + QPS: 30, + Burst: 300, + }, + WriteRateLimiter: RateLimiterConfig{ + QPS: 20, + Burst: 200, + }, + } +) + +// The plugin manager automatically queries the remote API +type RateLimiterConfig struct { + // Queries per second from one process to the remote service + QPS int `json:"qps" pflag:",Defines the max rate of calls per second."` + + // Maximum burst size + Burst int `json:"burst" pflag:",Defines the maximum burst size."` +} + +type CachingConfig struct { + // Max number of Resource's to be stored in the local cache + Size int `json:"size" pflag:",Defines the maximum number of items to cache."` + + // How often to query for objects in remote service. + ResyncInterval config.Duration `json:"resyncInterval" pflag:",Defines the sync interval."` + + // Workers control how many parallel workers should start up to retrieve updates + // about resources. + Workers int `json:"workers" pflag:",Defines the number of workers to start up to process items."` + + // MaxSystemFailures defines the number of failures to fetch a task before failing the task. + MaxSystemFailures int `json:"maxSystemFailures" pflag:",Defines the number of failures to fetch a task before failing the task."` +} + +type ResourceQuotas map[core.ResourceNamespace]int + +// Properties that help the system optimize itself to handle the specific plugin +type PluginConfig struct { + // ResourceQuotas allows the plugin to register resources' quotas to ensure the system comply with restrictions in + // the remote service. + ResourceQuotas ResourceQuotas `json:"resourceQuotas" pflag:"-,Defines resource quotas."` + ReadRateLimiter RateLimiterConfig `json:"readRateLimiter" pflag:",Defines rate limiter properties for read actions (e.g. retrieve status)."` + WriteRateLimiter RateLimiterConfig `json:"writeRateLimiter" pflag:",Defines rate limiter properties for write actions."` + Caching CachingConfig `json:"caching" pflag:",Defines caching characteristics."` + // Gets an empty copy for the custom state that can be used in ResourceMeta when + // interacting with the remote service. + ResourceMeta ResourceMeta `json:"resourceMeta" pflag:"-,A copy for the custom state."` +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags.go b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags.go new file mode 100755 index 0000000000..b989f11713 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags.go @@ -0,0 +1,62 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package webapi + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (PluginConfig) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (PluginConfig) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (PluginConfig) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in PluginConfig and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg PluginConfig) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("PluginConfig", pflag.ExitOnError) + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "readRateLimiter.qps"), DefaultPluginConfig.ReadRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "readRateLimiter.burst"), DefaultPluginConfig.ReadRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "writeRateLimiter.qps"), DefaultPluginConfig.WriteRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "writeRateLimiter.burst"), DefaultPluginConfig.WriteRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "caching.size"), DefaultPluginConfig.Caching.Size, "Defines the maximum number of items to cache.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "caching.resyncInterval"), DefaultPluginConfig.Caching.ResyncInterval.String(), "Defines the sync interval.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "caching.workers"), DefaultPluginConfig.Caching.Workers, "Defines the number of workers to start up to process items.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "caching.maxSystemFailures"), DefaultPluginConfig.Caching.MaxSystemFailures, "Defines the number of failures to fetch a task before failing the task.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags_test.go b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags_test.go new file mode 100755 index 0000000000..9f5124f069 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/webapi/pluginconfig_flags_test.go @@ -0,0 +1,214 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package webapi + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsPluginConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementPluginConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsPluginConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookPluginConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementPluginConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_PluginConfig(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookPluginConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_PluginConfig(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_PluginConfig(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_PluginConfig(val, result)) +} + +func testDecodeRaw_PluginConfig(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_PluginConfig(vStringSlice, result)) +} + +func TestPluginConfig_GetPFlagSet(t *testing.T) { + val := PluginConfig{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestPluginConfig_SetFlags(t *testing.T) { + actual := PluginConfig{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_readRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("readRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("readRateLimiter.qps"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.ReadRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_readRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("readRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("readRateLimiter.burst"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.ReadRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_writeRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("writeRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("writeRateLimiter.qps"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.WriteRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_writeRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("writeRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("writeRateLimiter.burst"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.WriteRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_caching.size", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("caching.size", testValue) + if vInt, err := cmdFlags.GetInt("caching.size"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.Caching.Size) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_caching.resyncInterval", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := DefaultPluginConfig.Caching.ResyncInterval.String() + + cmdFlags.Set("caching.resyncInterval", testValue) + if vString, err := cmdFlags.GetString("caching.resyncInterval"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vString), &actual.Caching.ResyncInterval) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_caching.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("caching.workers", testValue) + if vInt, err := cmdFlags.GetInt("caching.workers"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.Caching.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_caching.maxSystemFailures", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("caching.maxSystemFailures", testValue) + if vInt, err := cmdFlags.GetInt("caching.maxSystemFailures"); err == nil { + testDecodeJson_PluginConfig(t, fmt.Sprintf("%v", vInt), &actual.Caching.MaxSystemFailures) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/config.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/config.go new file mode 100644 index 0000000000..e05341daa2 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/config.go @@ -0,0 +1,8 @@ +package workqueue + +// Config for the queue +type Config struct { + Workers int `json:"workers" pflag:",Number of concurrent workers to start processing the queue."` + MaxRetries int `json:"maxRetries" pflag:",Maximum number of retries per item."` + IndexCacheMaxItems int `json:"maxItems" pflag:",Maximum number of entries to keep in the index."` +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/indexed_work_queue.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/indexed_work_queue.go new file mode 100644 index 0000000000..c160d06a20 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/indexed_work_queue.go @@ -0,0 +1,127 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + workqueue "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + mock "github.com/stretchr/testify/mock" +) + +// IndexedWorkQueue is an autogenerated mock type for the IndexedWorkQueue type +type IndexedWorkQueue struct { + mock.Mock +} + +type IndexedWorkQueue_Get struct { + *mock.Call +} + +func (_m IndexedWorkQueue_Get) Return(info workqueue.WorkItemInfo, found bool, err error) *IndexedWorkQueue_Get { + return &IndexedWorkQueue_Get{Call: _m.Call.Return(info, found, err)} +} + +func (_m *IndexedWorkQueue) OnGet(id string) *IndexedWorkQueue_Get { + c_call := _m.On("Get", id) + return &IndexedWorkQueue_Get{Call: c_call} +} + +func (_m *IndexedWorkQueue) OnGetMatch(matchers ...interface{}) *IndexedWorkQueue_Get { + c_call := _m.On("Get", matchers...) + return &IndexedWorkQueue_Get{Call: c_call} +} + +// Get provides a mock function with given fields: id +func (_m *IndexedWorkQueue) Get(id string) (workqueue.WorkItemInfo, bool, error) { + ret := _m.Called(id) + + var r0 workqueue.WorkItemInfo + if rf, ok := ret.Get(0).(func(string) workqueue.WorkItemInfo); ok { + r0 = rf(id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(workqueue.WorkItemInfo) + } + } + + var r1 bool + if rf, ok := ret.Get(1).(func(string) bool); ok { + r1 = rf(id) + } else { + r1 = ret.Get(1).(bool) + } + + var r2 error + if rf, ok := ret.Get(2).(func(string) error); ok { + r2 = rf(id) + } else { + r2 = ret.Error(2) + } + + return r0, r1, r2 +} + +type IndexedWorkQueue_Queue struct { + *mock.Call +} + +func (_m IndexedWorkQueue_Queue) Return(_a0 error) *IndexedWorkQueue_Queue { + return &IndexedWorkQueue_Queue{Call: _m.Call.Return(_a0)} +} + +func (_m *IndexedWorkQueue) OnQueue(ctx context.Context, id string, once workqueue.WorkItem) *IndexedWorkQueue_Queue { + c_call := _m.On("Queue", ctx, id, once) + return &IndexedWorkQueue_Queue{Call: c_call} +} + +func (_m *IndexedWorkQueue) OnQueueMatch(matchers ...interface{}) *IndexedWorkQueue_Queue { + c_call := _m.On("Queue", matchers...) + return &IndexedWorkQueue_Queue{Call: c_call} +} + +// Queue provides a mock function with given fields: ctx, id, once +func (_m *IndexedWorkQueue) Queue(ctx context.Context, id string, once workqueue.WorkItem) error { + ret := _m.Called(ctx, id, once) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, workqueue.WorkItem) error); ok { + r0 = rf(ctx, id, once) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type IndexedWorkQueue_Start struct { + *mock.Call +} + +func (_m IndexedWorkQueue_Start) Return(_a0 error) *IndexedWorkQueue_Start { + return &IndexedWorkQueue_Start{Call: _m.Call.Return(_a0)} +} + +func (_m *IndexedWorkQueue) OnStart(ctx context.Context) *IndexedWorkQueue_Start { + c_call := _m.On("Start", ctx) + return &IndexedWorkQueue_Start{Call: c_call} +} + +func (_m *IndexedWorkQueue) OnStartMatch(matchers ...interface{}) *IndexedWorkQueue_Start { + c_call := _m.On("Start", matchers...) + return &IndexedWorkQueue_Start{Call: c_call} +} + +// Start provides a mock function with given fields: ctx +func (_m *IndexedWorkQueue) Start(ctx context.Context) error { + ret := _m.Called(ctx) + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) + } else { + r0 = ret.Error(0) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/processor.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/processor.go new file mode 100644 index 0000000000..879cf344ac --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/processor.go @@ -0,0 +1,54 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + context "context" + + workqueue "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + mock "github.com/stretchr/testify/mock" +) + +// Processor is an autogenerated mock type for the Processor type +type Processor struct { + mock.Mock +} + +type Processor_Process struct { + *mock.Call +} + +func (_m Processor_Process) Return(_a0 workqueue.WorkStatus, _a1 error) *Processor_Process { + return &Processor_Process{Call: _m.Call.Return(_a0, _a1)} +} + +func (_m *Processor) OnProcess(ctx context.Context, workItem workqueue.WorkItem) *Processor_Process { + c_call := _m.On("Process", ctx, workItem) + return &Processor_Process{Call: c_call} +} + +func (_m *Processor) OnProcessMatch(matchers ...interface{}) *Processor_Process { + c_call := _m.On("Process", matchers...) + return &Processor_Process{Call: c_call} +} + +// Process provides a mock function with given fields: ctx, workItem +func (_m *Processor) Process(ctx context.Context, workItem workqueue.WorkItem) (workqueue.WorkStatus, error) { + ret := _m.Called(ctx, workItem) + + var r0 workqueue.WorkStatus + if rf, ok := ret.Get(0).(func(context.Context, workqueue.WorkItem) workqueue.WorkStatus); ok { + r0 = rf(ctx, workItem) + } else { + r0 = ret.Get(0).(workqueue.WorkStatus) + } + + var r1 error + if rf, ok := ret.Get(1).(func(context.Context, workqueue.WorkItem) error); ok { + r1 = rf(ctx, workItem) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item.go new file mode 100644 index 0000000000..bf5a6070be --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item.go @@ -0,0 +1,10 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import mock "github.com/stretchr/testify/mock" + +// WorkItem is an autogenerated mock type for the WorkItem type +type WorkItem struct { + mock.Mock +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item_info.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item_info.go new file mode 100644 index 0000000000..d50df39135 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/mocks/work_item_info.go @@ -0,0 +1,143 @@ +// Code generated by mockery v1.0.1. DO NOT EDIT. + +package mocks + +import ( + workqueue "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + mock "github.com/stretchr/testify/mock" +) + +// WorkItemInfo is an autogenerated mock type for the WorkItemInfo type +type WorkItemInfo struct { + mock.Mock +} + +type WorkItemInfo_Error struct { + *mock.Call +} + +func (_m WorkItemInfo_Error) Return(_a0 error) *WorkItemInfo_Error { + return &WorkItemInfo_Error{Call: _m.Call.Return(_a0)} +} + +func (_m *WorkItemInfo) OnError() *WorkItemInfo_Error { + c_call := _m.On("Error") + return &WorkItemInfo_Error{Call: c_call} +} + +func (_m *WorkItemInfo) OnErrorMatch(matchers ...interface{}) *WorkItemInfo_Error { + c_call := _m.On("Error", matchers...) + return &WorkItemInfo_Error{Call: c_call} +} + +// Error provides a mock function with given fields: +func (_m *WorkItemInfo) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +type WorkItemInfo_ID struct { + *mock.Call +} + +func (_m WorkItemInfo_ID) Return(_a0 string) *WorkItemInfo_ID { + return &WorkItemInfo_ID{Call: _m.Call.Return(_a0)} +} + +func (_m *WorkItemInfo) OnID() *WorkItemInfo_ID { + c_call := _m.On("ID") + return &WorkItemInfo_ID{Call: c_call} +} + +func (_m *WorkItemInfo) OnIDMatch(matchers ...interface{}) *WorkItemInfo_ID { + c_call := _m.On("ID", matchers...) + return &WorkItemInfo_ID{Call: c_call} +} + +// ID provides a mock function with given fields: +func (_m *WorkItemInfo) ID() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +type WorkItemInfo_Item struct { + *mock.Call +} + +func (_m WorkItemInfo_Item) Return(_a0 workqueue.WorkItem) *WorkItemInfo_Item { + return &WorkItemInfo_Item{Call: _m.Call.Return(_a0)} +} + +func (_m *WorkItemInfo) OnItem() *WorkItemInfo_Item { + c_call := _m.On("Item") + return &WorkItemInfo_Item{Call: c_call} +} + +func (_m *WorkItemInfo) OnItemMatch(matchers ...interface{}) *WorkItemInfo_Item { + c_call := _m.On("Item", matchers...) + return &WorkItemInfo_Item{Call: c_call} +} + +// Item provides a mock function with given fields: +func (_m *WorkItemInfo) Item() workqueue.WorkItem { + ret := _m.Called() + + var r0 workqueue.WorkItem + if rf, ok := ret.Get(0).(func() workqueue.WorkItem); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(workqueue.WorkItem) + } + } + + return r0 +} + +type WorkItemInfo_Status struct { + *mock.Call +} + +func (_m WorkItemInfo_Status) Return(_a0 workqueue.WorkStatus) *WorkItemInfo_Status { + return &WorkItemInfo_Status{Call: _m.Call.Return(_a0)} +} + +func (_m *WorkItemInfo) OnStatus() *WorkItemInfo_Status { + c_call := _m.On("Status") + return &WorkItemInfo_Status{Call: c_call} +} + +func (_m *WorkItemInfo) OnStatusMatch(matchers ...interface{}) *WorkItemInfo_Status { + c_call := _m.On("Status", matchers...) + return &WorkItemInfo_Status{Call: c_call} +} + +// Status provides a mock function with given fields: +func (_m *WorkItemInfo) Status() workqueue.WorkStatus { + ret := _m.Called() + + var r0 workqueue.WorkStatus + if rf, ok := ret.Get(0).(func() workqueue.WorkStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(workqueue.WorkStatus) + } + + return r0 +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go new file mode 100644 index 0000000000..5b4d8904fc --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue.go @@ -0,0 +1,281 @@ +package workqueue + +import ( + "context" + "fmt" + "sync" + + lru "github.com/hashicorp/golang-lru" + "github.com/prometheus/client_golang/prometheus" + "k8s.io/client-go/util/workqueue" + + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +//go:generate mockery -all -case=underscore +//go:generate enumer --type=WorkStatus + +type WorkItemID = string +type WorkStatus uint8 + +const ( + WorkStatusNotDone WorkStatus = iota + WorkStatusSucceeded + WorkStatusFailed +) + +const ( + ErrNotYetStarted errors.ErrorCode = "NOT_STARTED" +) + +func (w WorkStatus) IsTerminal() bool { + return w == WorkStatusFailed || w == WorkStatusSucceeded +} + +// WorkItem is a generic item that can be stored in the work queue. +type WorkItem interface{} + +// Represents the result of the work item processing. +type WorkItemInfo interface { + Item() WorkItem + ID() WorkItemID + Status() WorkStatus + Error() error +} + +// Represents the indexed queue semantics. An indexed work queue is a work queue that additionally keeps track of the +// final processing results of work items. +type IndexedWorkQueue interface { + // Queues the item to be processed. If the item is already in the cache or has been processed before (and is still + // in-memory), it'll not be added again. + Queue(ctx context.Context, id WorkItemID, once WorkItem) error + + // Retrieves an item by id. + Get(id WorkItemID) (info WorkItemInfo, found bool, err error) + + // Start must be called before queuing items into the queue. + Start(ctx context.Context) error +} + +// Represents the processor logic to operate on work items. +type Processor interface { + Process(ctx context.Context, workItem WorkItem) (WorkStatus, error) +} + +type workItemWrapper struct { + id WorkItemID + logFields map[string]interface{} + payload WorkItem + status WorkStatus + retryCount uint + err error +} + +func (w workItemWrapper) Item() WorkItem { + return w.payload +} + +func (w workItemWrapper) ID() WorkItemID { + return w.id +} + +func (w workItemWrapper) Status() WorkStatus { + return w.status +} + +func (w workItemWrapper) Error() error { + return w.err +} + +func (w workItemWrapper) Clone() workItemWrapper { + return w +} + +type metrics struct { + CacheHit prometheus.Counter + CacheMiss prometheus.Counter + ProcessorErrors prometheus.Counter + Scope promutils.Scope +} + +type queue struct { + name string + metrics metrics + wlock sync.Mutex + rlock sync.RWMutex + workers int + maxRetries int + started bool + queue workqueue.Interface + index workItemCache + processor Processor +} + +type workItemCache struct { + *lru.Cache +} + +func (c workItemCache) Get(id WorkItemID) (item *workItemWrapper, found bool) { + o, found := c.Cache.Get(id) + if !found { + return nil, found + } + + return o.(*workItemWrapper), true +} + +func (c workItemCache) Add(item *workItemWrapper) (evicted bool) { + return c.Cache.Add(item.id, item) +} + +func copyAllowedLogFields(ctx context.Context) map[string]interface{} { + logFields := contextutils.GetLogFields(ctx) + delete(logFields, contextutils.RoutineLabelKey.String()) + return logFields +} + +func (q *queue) Queue(ctx context.Context, id WorkItemID, once WorkItem) error { + q.wlock.Lock() + defer q.wlock.Unlock() + + if !q.started { + return errors.Errorf(ErrNotYetStarted, "Queue must be started before enqueuing any item.") + } + + if _, found := q.index.Get(id); found { + return nil + } + + wrapper := &workItemWrapper{ + id: id, + logFields: copyAllowedLogFields(ctx), + payload: once, + } + + q.index.Add(wrapper) + q.queue.Add(wrapper) + return nil +} + +func (q *queue) Get(id WorkItemID) (info WorkItemInfo, found bool, err error) { + q.rlock.Lock() + defer q.rlock.Unlock() + + wrapper, found := q.index.Get(id) + if !found { + q.metrics.CacheMiss.Inc() + return nil, found, nil + } + + v := wrapper.Clone() + q.metrics.CacheHit.Inc() + return &v, true, nil +} + +func contextWithValues(ctx context.Context, fields map[string]interface{}) context.Context { + for key, value := range fields { + ctx = context.WithValue(ctx, contextutils.Key(key), value) + } + + return ctx +} + +func (q *queue) Start(ctx context.Context) error { + q.wlock.Lock() + defer q.wlock.Unlock() + + if q.started { + return fmt.Errorf("queue already started") + } + + for i := 0; i < q.workers; i++ { + go func(ctx context.Context) { + for { + select { + case <-ctx.Done(): + logger.Debug(ctx, "Context cancelled. Shutting down.") + return + default: + item, shutdown := q.queue.Get() + if shutdown { + logger.Debug(ctx, "Work queue is shutting down.") + return + } + + wrapperV := item.(*workItemWrapper).Clone() + wrapper := &wrapperV + ws := wrapper.status + var err error + + func() { + defer func() { + if e, ok := recover().(error); ok { + logger.Errorf(ctx, "Worker panic'd while processing item [%v]. Error: %v", wrapper.id, e) + err = e + } + }() + + ctxWithFields := contextWithValues(ctx, wrapper.logFields) + ws, err = q.processor.Process(ctxWithFields, wrapper.payload) + }() + + if err != nil { + q.metrics.ProcessorErrors.Inc() + + wrapper.retryCount++ + wrapper.err = err + if wrapper.retryCount >= uint(q.maxRetries) { + logger.Debugf(ctx, "WorkItem [%v] exhausted all retries. Last Error: %v.", + wrapper.ID(), err) + wrapper.status = WorkStatusFailed + ws = WorkStatusFailed + q.index.Add(wrapper) + continue + } + } + + wrapper.status = ws + q.index.Add(wrapper) + if !ws.IsTerminal() { + q.queue.Add(wrapper) + } + } + } + }(contextutils.WithGoroutineLabel(ctx, fmt.Sprintf("%v-worker-%v", q.name, i))) + } + + q.started = true + return nil +} + +func newMetrics(scope promutils.Scope) metrics { + return metrics{ + CacheHit: scope.MustNewCounter("cache_hit", "Counter for cache hits."), + CacheMiss: scope.MustNewCounter("cache_miss", "Counter for cache misses."), + ProcessorErrors: scope.MustNewCounter("proc_errors", "Counter for processor errors."), + Scope: scope, + } +} + +// Instantiates a new Indexed Work queue. +func NewIndexedWorkQueue(name string, processor Processor, cfg Config, metricsScope promutils.Scope) (IndexedWorkQueue, error) { + cache, err := lru.New(cfg.IndexCacheMaxItems) + if err != nil { + return nil, err + } + + return &queue{ + name: name, + metrics: newMetrics(metricsScope), + wlock: sync.Mutex{}, + rlock: sync.RWMutex{}, + workers: cfg.Workers, + maxRetries: cfg.MaxRetries, + queue: workqueue.NewNamed(metricsScope.CurrentScope()), + index: workItemCache{Cache: cache}, + processor: processor, + }, nil +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/queue_test.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue_test.go new file mode 100644 index 0000000000..5f763ace0e --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/queue_test.go @@ -0,0 +1,249 @@ +package workqueue + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + "github.com/go-test/deep" + lru "github.com/hashicorp/golang-lru" + "github.com/stretchr/testify/assert" + + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +type singleStatusProcessor struct { + targetStatus WorkStatus + expectedType interface{} + expectedContextFields []contextutils.Key +} + +func (s singleStatusProcessor) Process(ctx context.Context, workItem WorkItem) (WorkStatus, error) { + comingType := reflect.TypeOf(workItem) + expectedType := reflect.TypeOf(s.expectedType) + if comingType != expectedType { + return WorkStatusFailed, fmt.Errorf("expected Type != incoming Type. %v != %v", expectedType, comingType) + } + + for _, expectedField := range s.expectedContextFields { + actualVal := ctx.Value(expectedField) + if actualVal == nil { + return WorkStatusFailed, fmt.Errorf("expected field not found. [%v]", expectedField) + } + } + + return s.targetStatus, nil +} + +func newSingleStatusProcessor(expectedType interface{}, status WorkStatus) singleStatusProcessor { + return singleStatusProcessor{targetStatus: status, expectedType: expectedType} +} + +type alwaysFailingProcessor struct{} + +func (alwaysFailingProcessor) Process(ctx context.Context, workItem WorkItem) (WorkStatus, error) { + return WorkStatusNotDone, fmt.Errorf("this processor always errors") +} + +func TestWorkStatus_IsTerminal(t *testing.T) { + tests := []struct { + w WorkStatus + want bool + }{ + {WorkStatusSucceeded, true}, + {WorkStatusNotDone, false}, + {WorkStatusFailed, true}, + } + for _, tt := range tests { + t.Run(string(tt.w), func(t *testing.T) { + if got := tt.w.IsTerminal(); got != tt.want { + t.Errorf("WorkStatus.IsTerminal() = %v, want %v", got, tt.want) + } + }) + } +} + +func Test_workItemCache_Get(t *testing.T) { + l, err := lru.New(10) + assert.NoError(t, err) + + c := workItemCache{Cache: l} + item := &workItemWrapper{ + id: "ABC", + payload: "hello", + } + c.Add(item) + + tests := []struct { + name string + c workItemCache + args WorkItemID + wantItem *workItemWrapper + wantFound bool + }{ + {"Found", c, "ABC", item, true}, + {"NotFound", c, "EFG", nil, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + i, gotFound := tt.c.Get(tt.args) + if gotFound != tt.wantFound { + t.Errorf("workItemCache.Get() gotFound = %v, want %v", gotFound, tt.wantFound) + } + + if tt.wantItem != nil { + assert.Equal(t, tt.wantItem.ID(), i.ID()) + assert.Equal(t, tt.wantItem.Item(), i.Item()) + assert.Equal(t, tt.wantItem.Error(), i.Error()) + assert.Equal(t, tt.wantItem.Status(), i.Status()) + } + }) + } +} + +func Test_workItemCache_Add(t *testing.T) { + l, err := lru.New(1) + assert.NoError(t, err) + + c := workItemCache{Cache: l} + + tests := []struct { + name string + c workItemCache + args *workItemWrapper + wantEvicted bool + }{ + {"NotEvicted", c, &workItemWrapper{id: "abc"}, false}, + {"NotEvicted2", c, &workItemWrapper{id: "abc"}, false}, + {"Evicted", c, &workItemWrapper{id: "efg"}, true}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if gotEvicted := tt.c.Add(tt.args); gotEvicted != tt.wantEvicted { + t.Errorf("workItemCache.Add() = %v, want %v", gotEvicted, tt.wantEvicted) + } + }) + } +} + +func Test_queue_Queue(t *testing.T) { + t.Run("Err when not started", func(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", newSingleStatusProcessor("hello", WorkStatusFailed), Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + assert.Error(t, q.Queue(context.TODO(), "abc", "abc")) + }) + + t.Run("Started first", func(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", newSingleStatusProcessor("hello", WorkStatusSucceeded), Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancelNow := context.WithCancel(context.Background()) + assert.NoError(t, q.Start(ctx)) + assert.NoError(t, q.Queue(context.TODO(), "abc", "abc")) + cancelNow() + }) +} + +func Test_queue_Get(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", newSingleStatusProcessor(&workItemWrapper{}, WorkStatusSucceeded), Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancelNow := context.WithCancel(context.Background()) + defer cancelNow() + assert.NoError(t, q.Start(ctx)) + + assert.NoError(t, q.Queue(ctx, "abc", &workItemWrapper{ + id: "abc", + payload: "something", + })) + + tests := []struct { + name string + q IndexedWorkQueue + id WorkItemID + wantInfo WorkItemInfo + wantFound bool + wantErr bool + }{ + {"Found", q, "abc", &workItemWrapper{ + status: WorkStatusSucceeded, + id: "abc", + payload: &workItemWrapper{id: "abc", payload: "something"}, + }, true, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotInfo, gotFound, err := tt.q.Get(tt.id) + if (err != nil) != tt.wantErr { + t.Errorf("queue.Get() error = %v, wantErr %v", err, tt.wantErr) + return + } + if diff := deep.Equal(gotInfo, tt.wantInfo); diff != nil { + t.Errorf("queue.Get() diff = %v, gotInfo = %v, want %v", diff, gotInfo, tt.wantInfo) + } + if gotFound != tt.wantFound { + t.Errorf("queue.Get() gotFound = %v, want %v", gotFound, tt.wantFound) + } + }) + } +} + +func Test_queue_contextFields(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", singleStatusProcessor{ + targetStatus: WorkStatusSucceeded, + expectedType: &workItemWrapper{}, + expectedContextFields: []contextutils.Key{contextutils.RoutineLabelKey, contextutils.NamespaceKey}, + }, Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancelNow := context.WithCancel(context.Background()) + defer cancelNow() + assert.NoError(t, q.Start(ctx)) + + ctx = context.WithValue(context.Background(), contextutils.NamespaceKey, "blah") + assert.NoError(t, q.Queue(ctx, "abc", &workItemWrapper{ + id: "abc", + payload: "something", + logFields: copyAllowedLogFields(ctx), + })) + + wi, found, err := q.Get("abc") + for ; err == nil && (wi.Status() != WorkStatusSucceeded && wi.Status() != WorkStatusFailed); wi, found, err = q.Get("abc") { + assert.True(t, found) + assert.NoError(t, err) + } + + assert.True(t, found) + assert.NoError(t, err) + assert.Equal(t, WorkStatusSucceeded.String(), wi.Status().String()) + assert.NoError(t, wi.Error()) +} + +func Test_queue_Start(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", newSingleStatusProcessor("", WorkStatusSucceeded), Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancelNow := context.WithCancel(context.Background()) + defer cancelNow() + assert.NoError(t, q.Start(ctx)) + assert.Error(t, q.Start(ctx)) +} + +func Test_Failures(t *testing.T) { + q, err := NewIndexedWorkQueue("test1", alwaysFailingProcessor{}, Config{Workers: 1, MaxRetries: 0, IndexCacheMaxItems: 1}, promutils.NewTestScope()) + assert.NoError(t, err) + + ctx, cancelNow := context.WithCancel(context.Background()) + defer cancelNow() + assert.NoError(t, q.Start(ctx)) + + assert.NoError(t, q.Queue(ctx, "abc", "hello")) + time.Sleep(100 * time.Millisecond) + info, found, err := q.Get("abc") + assert.NoError(t, err) + assert.True(t, found) + assert.Equal(t, WorkStatusFailed.String(), info.Status().String()) +} diff --git a/flyteplugins/go/tasks/pluginmachinery/workqueue/workstatus_enumer.go b/flyteplugins/go/tasks/pluginmachinery/workqueue/workstatus_enumer.go new file mode 100644 index 0000000000..d0c51c7983 --- /dev/null +++ b/flyteplugins/go/tasks/pluginmachinery/workqueue/workstatus_enumer.go @@ -0,0 +1,50 @@ +// Code generated by "enumer --type=WorkStatus"; DO NOT EDIT. + +package workqueue + +import ( + "fmt" +) + +const _WorkStatusName = "WorkStatusNotDoneWorkStatusSucceededWorkStatusFailed" + +var _WorkStatusIndex = [...]uint8{0, 17, 36, 52} + +func (i WorkStatus) String() string { + if i >= WorkStatus(len(_WorkStatusIndex)-1) { + return fmt.Sprintf("WorkStatus(%d)", i) + } + return _WorkStatusName[_WorkStatusIndex[i]:_WorkStatusIndex[i+1]] +} + +var _WorkStatusValues = []WorkStatus{0, 1, 2} + +var _WorkStatusNameToValueMap = map[string]WorkStatus{ + _WorkStatusName[0:17]: 0, + _WorkStatusName[17:36]: 1, + _WorkStatusName[36:52]: 2, +} + +// WorkStatusString retrieves an enum value from the enum constants string name. +// Throws an error if the param is not part of the enum. +func WorkStatusString(s string) (WorkStatus, error) { + if val, ok := _WorkStatusNameToValueMap[s]; ok { + return val, nil + } + return 0, fmt.Errorf("%s does not belong to WorkStatus values", s) +} + +// WorkStatusValues returns all values of the enum +func WorkStatusValues() []WorkStatus { + return _WorkStatusValues +} + +// IsAWorkStatus returns "true" if the value is listed in the enum definition. "false" otherwise +func (i WorkStatus) IsAWorkStatus() bool { + for _, v := range _WorkStatusValues { + if i == v { + return true + } + } + return false +} diff --git a/flyteplugins/go/tasks/plugins/awsutils/awsutils.go b/flyteplugins/go/tasks/plugins/awsutils/awsutils.go new file mode 100644 index 0000000000..52bcc785d7 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/awsutils/awsutils.go @@ -0,0 +1,27 @@ +package awsutils + +import ( + core2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" +) + +func GetRoleFromSecurityContext(roleKey string, taskExecutionMetadata core2.TaskExecutionMetadata) string { + var role string + securityContext := taskExecutionMetadata.GetSecurityContext() + if securityContext.GetRunAs() != nil { + role = securityContext.GetRunAs().GetIamRole() + } + + // Continue this for backward compatibility + if len(role) == 0 { + role = getRole(roleKey, taskExecutionMetadata.GetAnnotations()) + } + return role +} + +func getRole(roleKey string, keyValueMap map[string]string) string { + if len(roleKey) > 0 { + return keyValueMap[roleKey] + } + + return "" +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config.go b/flyteplugins/go/tasks/plugins/k8s/dask/config.go new file mode 100644 index 0000000000..78393a435e --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config.go @@ -0,0 +1,29 @@ +package dask + +import ( + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + Logs: logs.DefaultConfig, + } + + configSection = pluginsConfig.MustRegisterSubSection("dask", &defaultConfig) +) + +// Config is config for 'dask' plugin +type Config struct { + Logs logs.LogConfig `json:"logs,omitempty"` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go new file mode 100755 index 0000000000..03774b772b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags.go @@ -0,0 +1,65 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package dask + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-enabled"), defaultConfig.Logs.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-region"), defaultConfig.Logs.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-log-group"), defaultConfig.Logs.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.cloudwatch-template-uri"), defaultConfig.Logs.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-enabled"), defaultConfig.Logs.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-url"), defaultConfig.Logs.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.kubernetes-template-uri"), defaultConfig.Logs.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-enabled"), defaultConfig.Logs.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.gcp-project"), defaultConfig.Logs.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-logresourcename"), defaultConfig.Logs.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.stackdriver-template-uri"), defaultConfig.Logs.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go new file mode 100755 index 0000000000..4cd2be2b44 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/config_flags_test.go @@ -0,0 +1,256 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package dask + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_logs.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.Logs.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Logs.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go new file mode 100644 index 0000000000..32d98a22ae --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask.go @@ -0,0 +1,467 @@ +package dask + +import ( + "context" + "fmt" + "strings" + "time" + + daskAPI "github.com/dask/dask-kubernetes/v2023/dask_kubernetes/operator/go_client/pkg/apis/kubernetes.dask.org/v1" + "github.com/samber/lo" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/pod" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const ( + daskTaskType = "dask" + KindDaskJob = "DaskJob" + defaultDaskJobRunnerPrimaryContainerName = "job-runner" +) + +func mergeMapInto(src map[string]string, dst map[string]string) { + for key, value := range src { + dst[key] = value + } +} + +func replacePrimaryContainer(spec *v1.PodSpec, primaryContainerName string, container v1.Container) error { + for i, c := range spec.Containers { + if c.Name == primaryContainerName { + spec.Containers[i] = container + return nil + } + } + return errors.Errorf(errors.BadTaskSpecification, "primary container [%v] not found in pod spec", primaryContainerName) +} + +type daskResourceHandler struct { +} + +func (daskResourceHandler) BuildIdentityResource(_ context.Context, _ pluginsCore.TaskExecutionMetadata) ( + client.Object, error) { + return &daskAPI.DaskJob{ + TypeMeta: metav1.TypeMeta{ + Kind: KindDaskJob, + APIVersion: daskAPI.SchemeGroupVersion.String(), + }, + }, nil +} + +func (p daskResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "nil task specification") + } + + daskJob := plugins.DaskJob{} + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &daskJob) + if err != nil { + return nil, errors.Wrapf(errors.BadTaskSpecification, err, "invalid TaskSpecification [%v], failed to unmarshal", taskTemplate.GetCustom()) + } + + podSpec, objectMeta, primaryContainerName, err := flytek8s.ToK8sPodSpec(ctx, taskCtx) + if err != nil { + return nil, err + } + nonInterruptibleTaskCtx := flytek8s.NewPluginTaskExecutionContext(taskCtx, flytek8s.WithInterruptible(false)) + nonInterruptiblePodSpec, _, _, err := flytek8s.ToK8sPodSpec(ctx, nonInterruptibleTaskCtx) + if err != nil { + return nil, err + } + + // Add labels and annotations to objectMeta as they're not added by ToK8sPodSpec + mergeMapInto(taskCtx.TaskExecutionMetadata().GetAnnotations(), objectMeta.Annotations) + mergeMapInto(taskCtx.TaskExecutionMetadata().GetLabels(), objectMeta.Labels) + + workerSpec, err := createWorkerSpec(*daskJob.Workers, podSpec, primaryContainerName, taskCtx.TaskExecutionMetadata()) + if err != nil { + return nil, err + } + + clusterName := taskCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + schedulerSpec, err := createSchedulerSpec(*daskJob.Scheduler, clusterName, nonInterruptiblePodSpec, primaryContainerName, taskCtx.TaskExecutionMetadata()) + if err != nil { + return nil, err + } + + jobSpec, err := createJobSpec(*workerSpec, *schedulerSpec, nonInterruptiblePodSpec, primaryContainerName, objectMeta) + if err != nil { + return nil, err + } + + job := &daskAPI.DaskJob{ + TypeMeta: metav1.TypeMeta{ + Kind: KindDaskJob, + APIVersion: daskAPI.SchemeGroupVersion.String(), + }, + ObjectMeta: *objectMeta, + Spec: *jobSpec, + } + return job, nil +} + +func createWorkerSpec(cluster plugins.DaskWorkerGroup, podSpec *v1.PodSpec, primaryContainerName string, + teMetadata pluginsCore.TaskExecutionMetadata) (*daskAPI.WorkerSpec, error) { + workerPodSpec := podSpec.DeepCopy() + primaryContainer, err := flytek8s.GetContainer(workerPodSpec, primaryContainerName) + if err != nil { + return nil, err + } + primaryContainer.Name = "dask-worker" + + // Set custom image if present + if cluster.GetImage() != "" { + primaryContainer.Image = cluster.GetImage() + } + + // Set custom resources + resources := &primaryContainer.Resources + clusterResources := cluster.GetResources() + if len(clusterResources.GetRequests()) >= 1 || len(clusterResources.GetLimits()) >= 1 { + resources, err = flytek8s.ToK8sResourceRequirements(cluster.GetResources()) + if err != nil { + return nil, err + } + + *resources = flytek8s.ApplyK8sResourceOverrides(teMetadata, resources) + } + if resources == nil { + resources = &v1.ResourceRequirements{} + } + primaryContainer.Resources = *resources + + // Set custom args + workerArgs := []string{ + "dask-worker", + "--name", + "$(DASK_WORKER_NAME)", + } + // If limits are set, append `--nthreads` and `--memory-limit` as per these docs: + // https://kubernetes.dask.org/en/latest/kubecluster.html?#best-practices + if resources.Limits != nil { + limits := resources.Limits + if limits.Cpu() != nil { + cpuCount := fmt.Sprintf("%v", limits.Cpu().Value()) + workerArgs = append(workerArgs, "--nthreads", cpuCount) + } + if limits.Memory() != nil { + memory := limits.Memory().String() + workerArgs = append(workerArgs, "--memory-limit", memory) + } + } + primaryContainer.Args = workerArgs + + err = replacePrimaryContainer(workerPodSpec, primaryContainerName, *primaryContainer) + if err != nil { + return nil, err + } + + // All workers are created as k8s deployment and must have a restart policy of Always + workerPodSpec.RestartPolicy = v1.RestartPolicyAlways + + return &daskAPI.WorkerSpec{ + Replicas: int(cluster.GetNumberOfWorkers()), + Spec: *workerPodSpec, + }, nil +} + +func createSchedulerSpec(scheduler plugins.DaskScheduler, clusterName string, podSpec *v1.PodSpec, primaryContainerName string, + teMetadata pluginsCore.TaskExecutionMetadata) (*daskAPI.SchedulerSpec, error) { + schedulerPodSpec := podSpec.DeepCopy() + primaryContainer, err := flytek8s.GetContainer(schedulerPodSpec, primaryContainerName) + if err != nil { + return nil, err + } + primaryContainer.Name = "scheduler" + + // Override image if applicable + if scheduler.GetImage() != "" { + primaryContainer.Image = scheduler.GetImage() + } + + // Override resources if applicable + resources := &primaryContainer.Resources + schedulerResources := scheduler.GetResources() + if len(schedulerResources.GetRequests()) >= 1 || len(schedulerResources.GetLimits()) >= 1 { + resources, err = flytek8s.ToK8sResourceRequirements(scheduler.GetResources()) + if err != nil { + return nil, err + } + + *resources = flytek8s.ApplyK8sResourceOverrides(teMetadata, resources) + } + primaryContainer.Resources = *resources + + // Override args + primaryContainer.Args = []string{"dask-scheduler"} + + // Add ports + primaryContainer.Ports = []v1.ContainerPort{ + { + Name: "tcp-comm", + ContainerPort: 8786, + Protocol: "TCP", + }, + { + Name: "dashboard", + ContainerPort: 8787, + Protocol: "TCP", + }, + } + + if primaryContainer.ReadinessProbe == nil { + primaryContainer.ReadinessProbe = &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromString("dashboard"), + }, + }, + InitialDelaySeconds: 5, + PeriodSeconds: 5, + FailureThreshold: 30, + } + } + + schedulerPodSpec.RestartPolicy = v1.RestartPolicyAlways + + // Set primary container + err = replacePrimaryContainer(schedulerPodSpec, primaryContainerName, *primaryContainer) + if err != nil { + return nil, err + } + + return &daskAPI.SchedulerSpec{ + Spec: *schedulerPodSpec, + Service: v1.ServiceSpec{ + Type: v1.ServiceTypeNodePort, + Selector: map[string]string{ + "dask.org/cluster-name": clusterName, + "dask.org/component": "scheduler", + }, + Ports: []v1.ServicePort{ + { + Name: "tcp-comm", + Protocol: "TCP", + Port: 8786, + TargetPort: intstr.FromString("tcp-comm"), + }, + { + Name: "dashboard", + Protocol: "TCP", + Port: 8787, + TargetPort: intstr.FromString("dashboard"), + }, + }, + }, + }, nil +} + +func createJobSpec(workerSpec daskAPI.WorkerSpec, schedulerSpec daskAPI.SchedulerSpec, podSpec *v1.PodSpec, primaryContainerName string, objectMeta *metav1.ObjectMeta) (*daskAPI.DaskJobSpec, error) { + jobPodSpec := podSpec.DeepCopy() + jobPodSpec.RestartPolicy = v1.RestartPolicyNever + + primaryContainer, err := flytek8s.GetContainer(jobPodSpec, primaryContainerName) + if err != nil { + return nil, err + } + primaryContainer.Name = "job-runner" + + err = replacePrimaryContainer(jobPodSpec, primaryContainerName, *primaryContainer) + if err != nil { + return nil, err + } + + return &daskAPI.DaskJobSpec{ + Job: daskAPI.JobSpec{ + Spec: *jobPodSpec, + }, + Cluster: daskAPI.DaskCluster{ + ObjectMeta: *objectMeta, + Spec: daskAPI.DaskClusterSpec{ + Worker: workerSpec, + Scheduler: schedulerSpec, + }, + }, + }, nil +} + +func (p daskResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, r client.Object) (pluginsCore.PhaseInfo, error) { + logPlugin, err := logs.InitializeLogPlugins(&GetConfig().Logs) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + job := r.(*daskAPI.DaskJob) + status := job.Status.JobStatus + occurredAt := time.Now() + + info := pluginsCore.TaskInfo{ + OccurredAt: &occurredAt, + } + + taskExecID := pluginContext.TaskExecutionMetadata().GetTaskExecutionID() + schedulerPodName, err := getDaskSchedulerPodName(ctx, job.Name, pluginContext) + if err != nil { + logger.Debug(ctx, "Failed to get dask scheduler pod name. Error: %v", err) + } + var enableVscode bool + if len(job.Spec.Cluster.Spec.Scheduler.Spec.Containers) > 0 { + enableVscode = flytek8s.IsVscodeEnabled(ctx, job.Spec.Cluster.Spec.Scheduler.Spec.Containers[0].Env) + } + input := tasklog.Input{ + Namespace: job.ObjectMeta.Namespace, + PodName: job.Status.JobRunnerPodName, + TaskExecutionID: taskExecID, + EnableVscode: enableVscode, + } + input.ExtraTemplateVars = append( + input.ExtraTemplateVars, + tasklog.TemplateVar{ + Regex: tasklog.MustCreateRegex("daskSchedulerName"), + Value: schedulerPodName, + }, + ) + + o, err := logPlugin.GetTaskLogs(input) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + info.Logs = o.TaskLogs + info.LogContext = &core.LogContext{ + PrimaryPodName: job.Status.JobRunnerPodName, + Pods: []*core.PodLogContext{ + { + Namespace: job.ObjectMeta.Namespace, + PodName: job.Status.JobRunnerPodName, + PrimaryContainerName: defaultDaskJobRunnerPrimaryContainerName, + Containers: []*core.ContainerContext{ + {ContainerName: defaultDaskJobRunnerPrimaryContainerName}, + }, + }, + }, + } + + phaseInfo, err := flytek8s.DemystifyFailedOrPendingPod(ctx, pluginContext, info, job.ObjectMeta.Namespace, job.Status.JobRunnerPodName, defaultDaskJobRunnerPrimaryContainerName) + if err != nil { + logger.Errorf(ctx, "Failed to demystify pod status for dask job-runner. Error: %v", err) + } + if phaseInfo.Phase().IsFailure() { + // If the job-runner pod is in a failure state, we can fail fast without checking the DaskJob status. + return phaseInfo, nil + } + switch status { + case "": + phaseInfo = pluginsCore.PhaseInfoInitializing(occurredAt, pluginsCore.DefaultPhaseVersion, "unknown", &info) + case daskAPI.DaskJobCreated: + phaseInfo = pluginsCore.PhaseInfoInitializing(occurredAt, pluginsCore.DefaultPhaseVersion, "job created", &info) + case daskAPI.DaskJobClusterCreated: + phaseInfo = pluginsCore.PhaseInfoInitializing(occurredAt, pluginsCore.DefaultPhaseVersion, "cluster created", &info) + case daskAPI.DaskJobFailed: + reason := "Dask Job failed" + phaseInfo = pluginsCore.PhaseInfoRetryableFailure(errors.DownstreamSystemError, reason, &info) + case daskAPI.DaskJobSuccessful: + phaseInfo = pluginsCore.PhaseInfoSuccess(&info) + default: + phaseInfo = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &info) + } + + ready, err := isDaskSchedulerReady(ctx, job.Name, pluginContext) + if err != nil { + logger.Warnf(ctx, "Failed to determine Dask dashboard readiness. Error: %v", err) + } else { + for _, tl := range info.Logs { + if tl != nil && tl.LinkType == core.TaskLog_DASHBOARD { + tl.Ready = ready + if !ready || phaseInfo.Phase() != pluginsCore.PhaseRunning { + phaseInfo.WithReason("Dask dashboard is not ready") + } else { + phaseInfo.WithReason("Dask dashboard is ready") + } + } else if tl != nil && tl.LinkType == core.TaskLog_IDE { + tl.Ready = ready + if !ready || phaseInfo.Phase() != pluginsCore.PhaseRunning { + phaseInfo.WithReason("Vscode server is not ready") + } else { + phaseInfo.WithReason("Vscode server is ready") + } + } + } + } + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + return phaseInfo, nil +} + +func getDaskSchedulerPodName(ctx context.Context, daskJobName string, pluginContext k8s.PluginContext) (string, error) { + podList := &v1.PodList{} + err := pluginContext.K8sReader().List(ctx, podList) + if err != nil { + return "", fmt.Errorf("failed to list dask execution pods. Error: %w", err) + } + pods := lo.Filter(podList.Items, func(pod v1.Pod, _ int) bool { + return strings.HasPrefix(pod.Name, daskJobName) && strings.Contains(pod.Name, "scheduler") && flytek8s.GetPrimaryContainerName(&pod) == "scheduler" + }) + if len(pods) == 0 { + return "", fmt.Errorf("no dask scheduler pod found for dask job [%v]", daskJobName) + } + return pods[0].Name, nil +} + +func isDaskSchedulerReady(ctx context.Context, daskJobName string, pluginContext k8s.PluginContext) (bool, error) { + podList := &v1.PodList{} + err := pluginContext.K8sReader().List(ctx, podList) + if err != nil { + return false, fmt.Errorf("failed to list dask execution pods. Error: %w", err) + } + pods := lo.Filter(podList.Items, func(p v1.Pod, _ int) bool { + return strings.HasPrefix(p.Name, daskJobName) && strings.Contains(p.Name, "scheduler") + }) + if len(pods) == 0 { + return false, nil + } else if len(pods) == 1 { + return pod.IsPodReady(&pods[0]), nil + } + + // More than one dask scheduler pod. Should not happen. + logger.Debug(ctx, "Cannot determine dask scheduler readiness as more than one dask scheduler pod found") + return false, fmt.Errorf("more than one dask scheduler pod found for dask job [%v]", daskJobName) +} + +func (daskResourceHandler) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +func init() { + if err := daskAPI.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: daskTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{daskTaskType}, + ResourceToWatch: &daskAPI.DaskJob{}, + Plugin: daskResourceHandler{}, + IsDefault: false, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go b/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go new file mode 100644 index 0000000000..4d9549531b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/dask/dask_test.go @@ -0,0 +1,1102 @@ +package dask + +import ( + "context" + "reflect" + "testing" + "time" + + daskAPI "github.com/dask/dask-kubernetes/v2023/dask_kubernetes/operator/go_client/pkg/apis/kubernetes.dask.org/v1" + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/structpb" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + stdlibUtils "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const ( + defaultTestImage = "image://" + testNWorkers = 10 + testTaskID = "some-acceptable-name" + podTemplateName = "dask-dummy-pod-template-name" + defaultServiceAccountName = "default-service-account" + defaultNamespace = "default-namespace" + podTempaltePriorityClassName = "pod-template-priority-class-name" +) + +var ( + testEnvVars = []v1.EnvVar{ + {Name: "Env_Var", Value: "Env_Val"}, + } + testArgs = []string{ + "execute-dask-task", + } + testAnnotations = map[string]string{"annotation-1": "val1"} + testLabels = map[string]string{"label-1": "val1"} + testPlatformResources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("10G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("10"), + v1.ResourceMemory: resource.MustParse("24G"), + }, + } + defaultResources = v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("8G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("8"), + v1.ResourceMemory: resource.MustParse("17G"), + }, + } + podTemplate = &v1.PodTemplate{ + ObjectMeta: metav1.ObjectMeta{ + Name: podTemplateName, + }, + Template: v1.PodTemplateSpec{ + Spec: v1.PodSpec{ + PriorityClassName: podTempaltePriorityClassName, + }, + }, + } +) + +func dummyDaskJob(status daskAPI.JobStatus) *daskAPI.DaskJob { + return &daskAPI.DaskJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: "dask-job-name", + Namespace: defaultNamespace, + }, + Spec: daskAPI.DaskJobSpec{ + Cluster: daskAPI.DaskCluster{ + Spec: daskAPI.DaskClusterSpec{ + Scheduler: daskAPI.SchedulerSpec{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "scheduler", + Image: defaultTestImage, + Env: testEnvVars, + }, + }, + }, + }, + }, + }, + }, + Status: daskAPI.DaskJobStatus{ + ClusterName: "dask-cluster-name", + EndTime: metav1.Time{Time: time.Now()}, + JobRunnerPodName: "job-runner-pod-name", + JobStatus: status, + StartTime: metav1.Time{Time: time.Now()}, + }, + } +} + +func dummpyDaskCustomObj(customImage string, resources *core.Resources) *plugins.DaskJob { + scheduler := plugins.DaskScheduler{ + Image: customImage, + Resources: resources, + } + + workers := plugins.DaskWorkerGroup{ + NumberOfWorkers: 10, + Image: customImage, + Resources: resources, + } + + daskJob := plugins.DaskJob{ + Scheduler: &scheduler, + Workers: &workers, + } + return &daskJob +} + +func dummyDaskTaskTemplate(customImage string, resources *core.Resources, podTemplateName string) *core.TaskTemplate { + // In a real usecase, resources will always be filled, but might be empty + if resources == nil { + resources = &core.Resources{ + Requests: []*core.Resources_ResourceEntry{}, + Limits: []*core.Resources_ResourceEntry{}, + } + } + + daskJob := dummpyDaskCustomObj(customImage, resources) + daskJobJSON, err := utils.MarshalToString(daskJob) + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + err = stdlibUtils.UnmarshalStringToPb(daskJobJSON, &structObj) + if err != nil { + panic(err) + } + var envVars []*core.KeyValuePair + for _, envVar := range testEnvVars { + envVars = append(envVars, &core.KeyValuePair{Key: envVar.Name, Value: envVar.Value}) + } + metadata := &core.TaskMetadata{ + PodTemplateName: podTemplateName, + } + return &core.TaskTemplate{ + Id: &core.Identifier{Name: "test-build-resource"}, + Type: daskTaskType, + Metadata: metadata, + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: defaultTestImage, + Args: testArgs, + Env: envVars, + }, + }, + Custom: &structObj, + } +} + +func dummyDaskTaskContext(taskTemplate *core.TaskTemplate, resources *v1.ResourceRequirements, extendedResources *core.ExtendedResources, isInterruptible bool) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.On("OutputWriter").Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return(testTaskID) + tID.On("GetUniqueNodeID").Return("an-unique-id") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetAnnotations().Return(testAnnotations) + taskExecutionMetadata.OnGetLabels().Return(testLabels) + taskExecutionMetadata.OnGetPlatformResources().Return(&testPlatformResources) + taskExecutionMetadata.OnGetMaxAttempts().Return(uint32(1)) + taskExecutionMetadata.OnIsInterruptible().Return(isInterruptible) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(defaultServiceAccountName) + taskExecutionMetadata.OnGetNamespace().Return(defaultNamespace) + taskExecutionMetadata.OnGetConsoleURL().Return("") + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetPodTemplate().Return(nil) + overrides.OnGetContainerImage().Return("") + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskCtx.On("TaskExecutionMetadata").Return(taskExecutionMetadata) + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&k8s.PluginState{}).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = k8s.PluginState{} + return 0 + }, + func(v interface{}) error { + return nil + }) + + taskCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return taskCtx +} + +func dummyDaskPluginContext(taskTemplate *core.TaskTemplate, resources *v1.ResourceRequirements, pluginState k8s.PluginState) *k8smocks.PluginContext { + return dummyDaskPluginContextWithPods(taskTemplate, resources, pluginState) +} + +func dummyDaskPluginContextWithPods(taskTemplate *core.TaskTemplate, resources *v1.ResourceRequirements, pluginState k8s.PluginState, pods ...runtime.Object) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + pCtx.On("OutputWriter").Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return(testTaskID) + tID.On("GetUniqueNodeID").Return("an-unique-id") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetAnnotations().Return(testAnnotations) + taskExecutionMetadata.OnGetLabels().Return(testLabels) + taskExecutionMetadata.OnGetPlatformResources().Return(&testPlatformResources) + taskExecutionMetadata.OnGetMaxAttempts().Return(uint32(1)) + taskExecutionMetadata.OnIsInterruptible().Return(false) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(defaultServiceAccountName) + taskExecutionMetadata.OnGetNamespace().Return(defaultNamespace) + taskExecutionMetadata.OnGetConsoleURL().Return("") + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(nil) + overrides.OnGetContainerImage().Return("") + taskExecutionMetadata.OnGetOverrides().Return(overrides) + pCtx.On("TaskExecutionMetadata").Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + + // Add K8sReader mock + reader := fake.NewFakeClient(pods...) + pCtx.OnK8sReader().Return(reader) + + pCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return pCtx +} + +func TestBuildResourceDaskHappyPath(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &defaultResources, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + var defaultTolerations []v1.Toleration + defaultNodeSelector := map[string]string{} + defaultAffinity := &v1.Affinity{ + NodeAffinity: nil, + PodAffinity: nil, + PodAntiAffinity: nil, + } + + // Job + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, testAnnotations, daskJob.ObjectMeta.GetAnnotations()) + assert.Equal(t, testLabels, daskJob.ObjectMeta.GetLabels()) + assert.Equal(t, v1.RestartPolicyNever, jobSpec.RestartPolicy) + assert.Equal(t, "job-runner", jobSpec.Containers[0].Name) + assert.Equal(t, defaultTestImage, jobSpec.Containers[0].Image) + assert.Equal(t, testArgs, jobSpec.Containers[0].Args) + assert.Equal(t, defaultResources, jobSpec.Containers[0].Resources) + assert.Equal(t, defaultTolerations, jobSpec.Tolerations) + assert.Equal(t, defaultNodeSelector, jobSpec.NodeSelector) + assert.Equal(t, defaultAffinity, jobSpec.Affinity) + + // Flyte adds more environment variables to the runner + assert.Contains(t, jobSpec.Containers[0].Env, testEnvVars[0]) + + // Cluster + assert.Equal(t, testAnnotations, daskJob.Spec.Cluster.ObjectMeta.GetAnnotations()) + assert.Equal(t, testLabels, daskJob.Spec.Cluster.ObjectMeta.GetLabels()) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + expectedPorts := []v1.ContainerPort{ + { + Name: "tcp-comm", + ContainerPort: 8786, + Protocol: "TCP", + }, + { + Name: "dashboard", + ContainerPort: 8787, + Protocol: "TCP", + }, + } + assert.Equal(t, v1.RestartPolicyAlways, schedulerSpec.RestartPolicy) + assert.Equal(t, defaultTestImage, schedulerSpec.Containers[0].Image) + assert.Equal(t, defaultResources, schedulerSpec.Containers[0].Resources) + assert.Equal(t, []string{"dask-scheduler"}, schedulerSpec.Containers[0].Args) + assert.Equal(t, expectedPorts, schedulerSpec.Containers[0].Ports) + // Flyte adds more environment variables to the scheduler + assert.Contains(t, schedulerSpec.Containers[0].Env, testEnvVars[0]) + assert.Equal(t, defaultTolerations, schedulerSpec.Tolerations) + assert.Equal(t, defaultNodeSelector, schedulerSpec.NodeSelector) + assert.Equal(t, defaultAffinity, schedulerSpec.Affinity) + + schedulerServiceSpec := daskJob.Spec.Cluster.Spec.Scheduler.Service + expectedSelector := map[string]string{ + "dask.org/cluster-name": testTaskID, + "dask.org/component": "scheduler", + } + expectedSerivcePorts := []v1.ServicePort{ + { + Name: "tcp-comm", + Protocol: "TCP", + Port: 8786, + TargetPort: intstr.FromString("tcp-comm"), + }, + { + Name: "dashboard", + Protocol: "TCP", + Port: 8787, + TargetPort: intstr.FromString("dashboard"), + }, + } + assert.Equal(t, v1.ServiceTypeNodePort, schedulerServiceSpec.Type) + assert.Equal(t, expectedSelector, schedulerServiceSpec.Selector) + assert.Equal(t, expectedSerivcePorts, schedulerServiceSpec.Ports) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, testNWorkers, daskJob.Spec.Cluster.Spec.Worker.Replicas) + assert.Equal(t, "dask-worker", workerSpec.Containers[0].Name) + assert.Equal(t, defaultTestImage, workerSpec.Containers[0].Image) + assert.Equal(t, defaultResources, workerSpec.Containers[0].Resources) + // Flyte adds more environment variables to the worker + assert.Contains(t, workerSpec.Containers[0].Env, testEnvVars[0]) + assert.Equal(t, defaultTolerations, workerSpec.Tolerations) + assert.Equal(t, defaultNodeSelector, workerSpec.NodeSelector) + assert.Equal(t, defaultAffinity, workerSpec.Affinity) + assert.Equal(t, []string{ + "dask-worker", + "--name", + "$(DASK_WORKER_NAME)", + "--nthreads", + "8", + "--memory-limit", + "17G", + }, workerSpec.Containers[0].Args) + assert.Equal(t, workerSpec.RestartPolicy, v1.RestartPolicyAlways) +} + +func TestBuildResourceDaskCustomImages(t *testing.T) { + customImage := "customImage" + + daskResourceHandler := daskResourceHandler{} + taskTemplate := dummyDaskTaskTemplate(customImage, nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &v1.ResourceRequirements{}, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + // Job + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, defaultTestImage, jobSpec.Containers[0].Image) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.Equal(t, customImage, schedulerSpec.Containers[0].Image) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, customImage, workerSpec.Containers[0].Image) +} + +func TestBuildResourceDaskDefaultResoureRequirements(t *testing.T) { + flyteWorkflowResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + } + + daskResourceHandler := daskResourceHandler{} + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &flyteWorkflowResources, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + expectedResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + } + + // Job + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, expectedResources, jobSpec.Containers[0].Resources) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.Equal(t, expectedResources, schedulerSpec.Containers[0].Resources) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, expectedResources, workerSpec.Containers[0].Resources) + assert.Contains(t, workerSpec.Containers[0].Args, "--nthreads") + assert.Contains(t, workerSpec.Containers[0].Args, "2") + assert.Contains(t, workerSpec.Containers[0].Args, "--memory-limit") + assert.Contains(t, workerSpec.Containers[0].Args, "2G") +} + +func TestBuildResourceDaskAdjustResoureRequirements(t *testing.T) { + flyteWorkflowResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("16"), // Higher than platform limits + // Unset memory should be defaulted to from the request + }, + } + + daskResourceHandler := daskResourceHandler{} + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &flyteWorkflowResources, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + expectedResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: testPlatformResources.Limits[v1.ResourceCPU], + v1.ResourceMemory: flyteWorkflowResources.Requests[v1.ResourceMemory], + }, + } + + // Job + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, expectedResources, jobSpec.Containers[0].Resources) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.Equal(t, expectedResources, schedulerSpec.Containers[0].Resources) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, expectedResources, workerSpec.Containers[0].Resources) + assert.Contains(t, workerSpec.Containers[0].Args, "--nthreads") + assert.Contains(t, workerSpec.Containers[0].Args, "10") // from the adjusted, platform limits + assert.Contains(t, workerSpec.Containers[0].Args, "--memory-limit") + assert.Contains(t, workerSpec.Containers[0].Args, "2G") +} + +func TestBuildResourcesDaskCustomResoureRequirements(t *testing.T) { + protobufResources := core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + { + Name: core.Resources_CPU, + Value: "5", + }, + }, + Limits: []*core.Resources_ResourceEntry{ + { + Name: core.Resources_CPU, + Value: "10", + }, + { + Name: core.Resources_MEMORY, + Value: "15G", + }, + }, + } + expectedPbResources := proto.Clone(&protobufResources).(*core.Resources) + // We expect the unset memory request to come from the set memory limit + expectedPbResources.Requests = append(expectedPbResources.Requests, &core.Resources_ResourceEntry{ + Name: core.Resources_MEMORY, + Value: "15G", + }) + expectedResources, _ := flytek8s.ToK8sResourceRequirements(expectedPbResources) + + flyteWorkflowResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + } + + daskResourceHandler := daskResourceHandler{} + taskTemplate := dummyDaskTaskTemplate("", &protobufResources, "") + taskContext := dummyDaskTaskContext(taskTemplate, &flyteWorkflowResources, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + expectedJobResources := v1.ResourceRequirements{ + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2"), + v1.ResourceMemory: resource.MustParse("2G"), + }, + } + + // Job + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, expectedJobResources, jobSpec.Containers[0].Resources) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.Equal(t, *expectedResources, schedulerSpec.Containers[0].Resources) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, *expectedResources, workerSpec.Containers[0].Resources) + assert.Contains(t, workerSpec.Containers[0].Args, "--nthreads") + assert.Contains(t, workerSpec.Containers[0].Args, "10") + assert.Contains(t, workerSpec.Containers[0].Args, "--memory-limit") + assert.Contains(t, workerSpec.Containers[0].Args, "15G") +} + +func TestBuildResourceDaskInterruptible(t *testing.T) { + defaultNodeSelector := map[string]string{} + var defaultAffinity v1.Affinity + var defaultTolerations []v1.Toleration + + interruptibleNodeSelector := map[string]string{ + "x/interruptible": "true", + } + interruptibleNodeSelectorRequirement := &v1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: v1.NodeSelectorOpIn, + Values: []string{"true"}, + } + interruptibleTolerations := []v1.Toleration{ + { + Key: "x/flyte", + Value: "interruptible", + Operator: "Equal", + Effect: "NoSchedule", + }, + } + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + InterruptibleNodeSelector: interruptibleNodeSelector, + InterruptibleNodeSelectorRequirement: interruptibleNodeSelectorRequirement, + InterruptibleTolerations: interruptibleTolerations, + })) + + daskResourceHandler := daskResourceHandler{} + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &defaultResources, nil, true) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + // Job pod - should not be interruptible + jobSpec := daskJob.Spec.Job.Spec + assert.Equal(t, defaultTolerations, jobSpec.Tolerations) + assert.Equal(t, defaultNodeSelector, jobSpec.NodeSelector) + assert.Equal(t, &defaultAffinity, jobSpec.Affinity) + + // Scheduler - should not be interruptible + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.Equal(t, defaultTolerations, schedulerSpec.Tolerations) + assert.Equal(t, defaultNodeSelector, schedulerSpec.NodeSelector) + assert.Equal(t, &defaultAffinity, schedulerSpec.Affinity) + + // Default Workers - Should be interruptible + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.Equal(t, interruptibleTolerations, workerSpec.Tolerations) + assert.Equal(t, interruptibleNodeSelector, workerSpec.NodeSelector) + assert.Equal( + t, + workerSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *interruptibleNodeSelectorRequirement, + ) +} + +func TestBuildResouceDaskUsePodTemplate(t *testing.T) { + flytek8s.DefaultPodTemplateStore.Store(podTemplate) + daskResourceHandler := daskResourceHandler{} + taskTemplate := dummyDaskTaskTemplate("", nil, podTemplateName) + taskContext := dummyDaskTaskContext(taskTemplate, &defaultResources, nil, false) + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + assert.Equal(t, podTempaltePriorityClassName, daskJob.Spec.Job.Spec.PriorityClassName) + assert.Equal(t, podTempaltePriorityClassName, daskJob.Spec.Cluster.Spec.Scheduler.Spec.PriorityClassName) + assert.Equal(t, podTempaltePriorityClassName, daskJob.Spec.Cluster.Spec.Worker.Spec.PriorityClassName) + + // Cleanup + flytek8s.DefaultPodTemplateStore.Delete(podTemplate) +} + +func TestBuildResourceDaskExtendedResources(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, + })) + + fixtures := []struct { + name string + resources *v1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []v1.NodeSelectorTerm + expectedTol []v1.Toleration + }{ + { + "without overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskContext := dummyDaskTaskContext(taskTemplate, f.resources, f.extendedResourcesOverride, false) + daskResourceHandler := daskResourceHandler{} + r, err := daskResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + daskJob, ok := r.(*daskAPI.DaskJob) + assert.True(t, ok) + + // Job pod + jobSpec := daskJob.Spec.Job.Spec + assert.EqualValues( + t, + f.expectedNsr, + jobSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + jobSpec.Tolerations, + ) + + // Scheduler + schedulerSpec := daskJob.Spec.Cluster.Spec.Scheduler.Spec + assert.EqualValues( + t, + f.expectedNsr, + schedulerSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + schedulerSpec.Tolerations, + ) + + // Default Workers + workerSpec := daskJob.Spec.Cluster.Spec.Worker.Spec + assert.EqualValues( + t, + f.expectedNsr, + workerSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + workerSpec.Tolerations, + ) + }) + } +} + +func TestGetPropertiesDask(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + expected := k8s.PluginProperties{} + assert.Equal(t, expected, daskResourceHandler.GetProperties()) +} + +func TestBuildIdentityResourceDask(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + expected := &daskAPI.DaskJob{ + TypeMeta: metav1.TypeMeta{ + Kind: KindDaskJob, + APIVersion: daskAPI.SchemeGroupVersion.String(), + }, + } + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + taskContext := dummyDaskTaskContext(taskTemplate, &v1.ResourceRequirements{}, nil, false) + identityResources, err := daskResourceHandler.BuildIdentityResource(context.TODO(), taskContext.TaskExecutionMetadata()) + if err != nil { + panic(err) + } + assert.Equal(t, expected, identityResources) +} + +func TestGetTaskPhaseDask(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + pluginContext := dummyDaskPluginContext(taskTemplate, &v1.ResourceRequirements{}, k8s.PluginState{}) + expectedLogCtx := &core.LogContext{ + PrimaryPodName: "job-runner-pod-name", + Pods: []*core.PodLogContext{ + { + Namespace: defaultNamespace, + PodName: "job-runner-pod-name", + PrimaryContainerName: "job-runner", + Containers: []*core.ContainerContext{ + { + ContainerName: "job-runner", + }, + }, + }, + }, + } + + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob("")) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseInitializing) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobCreated)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseInitializing) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobClusterCreated)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseInitializing) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobRunning)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.Equal(t, expectedLogCtx.PrimaryPodName, taskPhase.Info().LogContext.PrimaryPodName) + assert.Nil(t, err) + + taskPhase, err = daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobSuccessful)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseSuccess) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.Equal(t, expectedLogCtx.PrimaryPodName, taskPhase.Info().LogContext.PrimaryPodName) + assert.Nil(t, err) + + taskPhase, err = daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobFailed)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRetryableFailure) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().Logs) + assert.Equal(t, expectedLogCtx.PrimaryPodName, taskPhase.Info().LogContext.PrimaryPodName) + assert.Nil(t, err) +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseInitializing, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + taskTemplate := dummyDaskTaskTemplate("", nil, "") + + pluginContext := dummyDaskPluginContext(taskTemplate, &v1.ResourceRequirements{}, pluginState) + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobCreated)) + + assert.NoError(t, err) + assert.Equal(t, taskPhase.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func TestGetTaskPhaseWithNamespaceInLogContext(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + pluginContext := dummyDaskPluginContext(taskTemplate, &v1.ResourceRequirements{}, k8s.PluginState{}) + + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobRunning)) + assert.NoError(t, err) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Equal(t, 1, len(taskPhase.Info().LogContext.Pods)) + + // Verify namespace is set in the pod log context + podLogContext := taskPhase.Info().LogContext.Pods[0] + assert.Equal(t, defaultNamespace, podLogContext.Namespace) + assert.Equal(t, "job-runner-pod-name", podLogContext.PodName) + assert.Equal(t, defaultDaskJobRunnerPrimaryContainerName, podLogContext.PrimaryContainerName) +} + +func TestGetTaskPhaseWithFailedPod(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + // Create a failed pod in the fake client + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-runner-pod-name", + Namespace: defaultNamespace, + }, + Status: v1.PodStatus{ + Phase: v1.PodFailed, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: defaultDaskJobRunnerPrimaryContainerName, + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + Message: "Container failed", + }, + }, + }, + }, + }, + } + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + pluginContext := dummyDaskPluginContextWithPods(taskTemplate, &v1.ResourceRequirements{}, k8s.PluginState{}, pod) + + // Even though DaskJob status is running, should return failure due to pod status + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobRunning)) + assert.NoError(t, err) + assert.True(t, taskPhase.Phase().IsFailure()) +} + +func TestGetTaskPhaseWithPendingPodInvalidImage(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + // Create a pending pod with InvalidImageName - this should fail immediately + pod := &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "job-runner-pod-name", + Namespace: defaultNamespace, + }, + Status: v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + LastTransitionTime: metav1.Time{Time: time.Now()}, + Reason: "ContainersNotReady", + Message: "containers with unready status: [job-runner]", + }, + }, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: defaultDaskJobRunnerPrimaryContainerName, + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "InvalidImageName", + Message: "Invalid image name", + }, + }, + }, + }, + }, + } + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + pluginContext := dummyDaskPluginContextWithPods(taskTemplate, &v1.ResourceRequirements{}, k8s.PluginState{}, pod) + + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobClusterCreated)) + assert.NoError(t, err) + // Should detect the InvalidImageName and return a failure phase + assert.True(t, taskPhase.Phase().IsFailure()) +} + +func TestGetTaskPhaseContainerNameConstant(t *testing.T) { + daskResourceHandler := daskResourceHandler{} + ctx := context.TODO() + + taskTemplate := dummyDaskTaskTemplate("", nil, "") + pluginContext := dummyDaskPluginContext(taskTemplate, &v1.ResourceRequirements{}, k8s.PluginState{}) + + taskPhase, err := daskResourceHandler.GetTaskPhase(ctx, pluginContext, dummyDaskJob(daskAPI.DaskJobSuccessful)) + assert.NoError(t, err) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + + // Verify the constant is used for container names + podLogContext := taskPhase.Info().LogContext.Pods[0] + assert.Equal(t, defaultDaskJobRunnerPrimaryContainerName, podLogContext.PrimaryContainerName) + assert.Equal(t, 1, len(podLogContext.Containers)) + assert.Equal(t, defaultDaskJobRunnerPrimaryContainerName, podLogContext.Containers[0].ContainerName) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator.go new file mode 100644 index 0000000000..bc993d852f --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator.go @@ -0,0 +1,398 @@ +package common + +import ( + "context" + "fmt" + "sort" + "time" + + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + v1 "k8s.io/api/core/v1" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + flyteerr "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + pluginsIdl "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +const ( + TensorflowTaskType = "tensorflow" + MPITaskType = "mpi" + PytorchTaskType = "pytorch" +) + +// ExtractCurrentCondition will return the first job condition for tensorflow/pytorch +func ExtractCurrentCondition(jobConditions []kubeflowv1.JobCondition) (kubeflowv1.JobCondition, error) { + if jobConditions != nil { + sort.Slice(jobConditions, func(i, j int) bool { + return jobConditions[i].LastTransitionTime.Time.After(jobConditions[j].LastTransitionTime.Time) + }) + + for _, jc := range jobConditions { + if jc.Status == v1.ConditionTrue { + return jc, nil + } + } + return kubeflowv1.JobCondition{}, fmt.Errorf("found no current condition. Conditions: %+v", jobConditions) + } + return kubeflowv1.JobCondition{}, nil +} + +// GetPhaseInfo will return the phase of kubeflow job +func GetPhaseInfo(currentCondition kubeflowv1.JobCondition, occurredAt time.Time, + taskPhaseInfo pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) { + if len(currentCondition.Type) == 0 { + return pluginsCore.PhaseInfoQueuedWithTaskInfo(occurredAt, pluginsCore.DefaultPhaseVersion, "JobCreated", &taskPhaseInfo), nil + } + switch currentCondition.Type { + case kubeflowv1.JobCreated: + return pluginsCore.PhaseInfoQueuedWithTaskInfo(occurredAt, pluginsCore.DefaultPhaseVersion, "JobCreated", &taskPhaseInfo), nil + case kubeflowv1.JobRunning: + return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &taskPhaseInfo), nil + case kubeflowv1.JobSucceeded: + return pluginsCore.PhaseInfoSuccess(&taskPhaseInfo), nil + case kubeflowv1.JobFailed: + details := fmt.Sprintf("Job failed:\n\t%v - %v", currentCondition.Reason, currentCondition.Message) + return pluginsCore.PhaseInfoRetryableFailure(flyteerr.DownstreamSystemError, details, &taskPhaseInfo), nil + case kubeflowv1.JobRestarting: + return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &taskPhaseInfo), nil + } + + return pluginsCore.PhaseInfoUndefined, nil +} + +// GetMPIPhaseInfo will return the phase of MPI job +func GetMPIPhaseInfo(currentCondition kubeflowv1.JobCondition, occurredAt time.Time, + taskPhaseInfo pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) { + switch currentCondition.Type { + case kubeflowv1.JobCreated: + return pluginsCore.PhaseInfoQueuedWithTaskInfo(occurredAt, pluginsCore.DefaultPhaseVersion, "New job name submitted to MPI operator", &taskPhaseInfo), nil + case kubeflowv1.JobRunning: + return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &taskPhaseInfo), nil + case kubeflowv1.JobSucceeded: + return pluginsCore.PhaseInfoSuccess(&taskPhaseInfo), nil + case kubeflowv1.JobFailed: + details := fmt.Sprintf("Job failed:\n\t%v - %v", currentCondition.Reason, currentCondition.Message) + return pluginsCore.PhaseInfoRetryableFailure(flyteerr.DownstreamSystemError, details, &taskPhaseInfo), nil + case kubeflowv1.JobRestarting: + return pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &taskPhaseInfo), nil + } + + return pluginsCore.PhaseInfoUndefined, nil +} + +// GetLogs will return the logs for kubeflow job +func GetLogs(pluginContext k8s.PluginContext, taskType string, objectMeta meta_v1.ObjectMeta, taskTemplate *core.TaskTemplate, hasMaster bool, + workersCount int32, psReplicasCount int32, chiefReplicasCount int32, evaluatorReplicasCount int32, primaryContainerName string) ([]*core.TaskLog, error) { + name := objectMeta.Name + namespace := objectMeta.Namespace + + taskLogs := make([]*core.TaskLog, 0, 10) + taskExecID := pluginContext.TaskExecutionMetadata().GetTaskExecutionID() + + logPlugin, err := logs.InitializeLogPlugins(logs.GetLogConfig()) + + if err != nil { + return nil, err + } + + if logPlugin == nil { + return nil, nil + } + + // We use the creation timestamp of the Kubeflow Job as a proxy for the start time of the pods + startTime := objectMeta.CreationTimestamp.Time.Unix() + // Don't have a good mechanism for this yet, but approximating with time.Now for now + finishTime := time.Now().Unix() + RFC3999StartTime := time.Unix(startTime, 0).Format(time.RFC3339) + RFC3999FinishTime := time.Unix(finishTime, 0).Format(time.RFC3339) + + if taskType == PytorchTaskType && hasMaster { + masterTaskLog, masterErr := logPlugin.GetTaskLogs( + tasklog.Input{ + PodName: name + "-master-0", + Namespace: namespace, + LogName: "master", + PodRFC3339StartTime: RFC3999StartTime, + PodRFC3339FinishTime: RFC3999FinishTime, + PodUnixStartTime: startTime, + PodUnixFinishTime: finishTime, + TaskExecutionID: taskExecID, + TaskTemplate: taskTemplate, + ContainerName: primaryContainerName, + }, + ) + if masterErr != nil { + return nil, masterErr + } + taskLogs = append(taskLogs, masterTaskLog.TaskLogs...) + } + + // get all workers log + for workerIndex := int32(0); workerIndex < workersCount; workerIndex++ { + workerLog, err := logPlugin.GetTaskLogs(tasklog.Input{ + PodName: name + fmt.Sprintf("-worker-%d", workerIndex), + Namespace: namespace, + PodRFC3339StartTime: RFC3999StartTime, + PodRFC3339FinishTime: RFC3999FinishTime, + PodUnixStartTime: startTime, + PodUnixFinishTime: finishTime, + TaskExecutionID: taskExecID, + TaskTemplate: taskTemplate, + ContainerName: primaryContainerName, + }) + if err != nil { + return nil, err + } + taskLogs = append(taskLogs, workerLog.TaskLogs...) + } + + if taskType == MPITaskType || taskType == PytorchTaskType { + return taskLogs, nil + } + + // get all parameter servers logs + for psReplicaIndex := int32(0); psReplicaIndex < psReplicasCount; psReplicaIndex++ { + psReplicaLog, err := logPlugin.GetTaskLogs(tasklog.Input{ + PodName: name + fmt.Sprintf("-psReplica-%d", psReplicaIndex), + Namespace: namespace, + TaskExecutionID: taskExecID, + TaskTemplate: taskTemplate, + }) + if err != nil { + return nil, err + } + taskLogs = append(taskLogs, psReplicaLog.TaskLogs...) + } + // get chief worker log, and the max number of chief worker is 1 + if chiefReplicasCount != 0 { + chiefReplicaLog, err := logPlugin.GetTaskLogs(tasklog.Input{ + PodName: name + fmt.Sprintf("-chiefReplica-%d", 0), + Namespace: namespace, + TaskExecutionID: taskExecID, + TaskTemplate: taskTemplate, + }) + if err != nil { + return nil, err + } + taskLogs = append(taskLogs, chiefReplicaLog.TaskLogs...) + } + // get evaluator log, and the max number of evaluator is 1 + if evaluatorReplicasCount != 0 { + evaluatorReplicasCount, err := logPlugin.GetTaskLogs(tasklog.Input{ + PodName: name + fmt.Sprintf("-evaluatorReplica-%d", 0), + Namespace: namespace, + TaskExecutionID: taskExecID, + TaskTemplate: taskTemplate, + }) + if err != nil { + return nil, err + } + taskLogs = append(taskLogs, evaluatorReplicasCount.TaskLogs...) + } + + return taskLogs, nil +} + +func OverridePrimaryContainerName(podSpec *v1.PodSpec, primaryContainerName string, defaultContainerName string) { + // Pytorch operator forces pod to have container named 'pytorch' + // https://github.com/kubeflow/pytorch-operator/blob/037cd1b18eb77f657f2a4bc8a8334f2a06324b57/pkg/apis/pytorch/validation/validation.go#L54-L62 + // Tensorflow operator forces pod to have container named 'tensorflow' + // https://github.com/kubeflow/tf-operator/blob/984adc287e6fe82841e4ca282dc9a2cbb71e2d4a/pkg/apis/tensorflow/validation/validation.go#L55-L63 + // hence we have to override the name set here + // https://github.com/flyteorg/flyteplugins/blob/209c52d002b4e6a39be5d175bc1046b7e631c153/go/tasks/pluginmachinery/flytek8s/container_helper.go#L116 + for idx, c := range podSpec.Containers { + if c.Name == primaryContainerName { + podSpec.Containers[idx].Name = defaultContainerName + return + } + } +} + +// ParseRunPolicy converts a kubeflow plugin RunPolicy object to a k8s RunPolicy object. +func ParseRunPolicy(flyteRunPolicy kfplugins.RunPolicy) kubeflowv1.RunPolicy { + runPolicy := kubeflowv1.RunPolicy{} + if flyteRunPolicy.GetBackoffLimit() != 0 { + var backoffLimit = flyteRunPolicy.GetBackoffLimit() + runPolicy.BackoffLimit = &backoffLimit + } + var cleanPodPolicy = ParseCleanPodPolicy(flyteRunPolicy.GetCleanPodPolicy()) + runPolicy.CleanPodPolicy = &cleanPodPolicy + if flyteRunPolicy.GetActiveDeadlineSeconds() != 0 { + var ddlSeconds = int64(flyteRunPolicy.GetActiveDeadlineSeconds()) + runPolicy.ActiveDeadlineSeconds = &ddlSeconds + } + if flyteRunPolicy.GetTtlSecondsAfterFinished() != 0 { + var ttl = flyteRunPolicy.GetTtlSecondsAfterFinished() + runPolicy.TTLSecondsAfterFinished = &ttl + } + + return runPolicy +} + +// Get k8s clean pod policy from flyte kubeflow plugins clean pod policy. +func ParseCleanPodPolicy(flyteCleanPodPolicy kfplugins.CleanPodPolicy) kubeflowv1.CleanPodPolicy { + cleanPodPolicyMap := map[kfplugins.CleanPodPolicy]kubeflowv1.CleanPodPolicy{ + kfplugins.CleanPodPolicy_CLEANPOD_POLICY_NONE: kubeflowv1.CleanPodPolicyNone, + kfplugins.CleanPodPolicy_CLEANPOD_POLICY_ALL: kubeflowv1.CleanPodPolicyAll, + kfplugins.CleanPodPolicy_CLEANPOD_POLICY_RUNNING: kubeflowv1.CleanPodPolicyRunning, + } + return cleanPodPolicyMap[flyteCleanPodPolicy] +} + +// Get k8s restart policy from flyte kubeflow plugins restart policy. +func ParseRestartPolicy(flyteRestartPolicy pluginsIdl.RestartPolicy) kubeflowv1.RestartPolicy { + restartPolicyMap := map[pluginsIdl.RestartPolicy]kubeflowv1.RestartPolicy{ + pluginsIdl.RestartPolicy_RESTART_POLICY_NEVER: kubeflowv1.RestartPolicyNever, + pluginsIdl.RestartPolicy_RESTART_POLICY_ON_FAILURE: kubeflowv1.RestartPolicyOnFailure, + pluginsIdl.RestartPolicy_RESTART_POLICY_ALWAYS: kubeflowv1.RestartPolicyAlways, + } + return restartPolicyMap[flyteRestartPolicy] +} + +// OverrideContainerSpec overrides the specified container's properties in the given podSpec. The function +// updates the image and command arguments of the container that matches the given containerName. +func OverrideContainerSpec(podSpec *v1.PodSpec, containerName string, image string, args []string) error { + for idx, c := range podSpec.Containers { + if c.Name == containerName { + if image != "" { + podSpec.Containers[idx].Image = image + } + if len(args) != 0 { + podSpec.Containers[idx].Args = args + } + } + } + return nil +} + +func ToReplicaSpec(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext, primaryContainerName string) (*kubeflowv1.ReplicaSpec, error) { + podSpec, objectMeta, oldPrimaryContainerName, err := flytek8s.ToK8sPodSpec(ctx, taskCtx) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create pod spec: [%v]", err.Error()) + } + + OverridePrimaryContainerName(podSpec, oldPrimaryContainerName, primaryContainerName) + + cfg := config.GetK8sPluginConfig() + objectMeta.Annotations = utils.UnionMaps(cfg.DefaultAnnotations, objectMeta.Annotations, utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations())) + objectMeta.Labels = utils.UnionMaps(cfg.DefaultLabels, objectMeta.Labels, utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels())) + + replicas := int32(0) + return &kubeflowv1.ReplicaSpec{ + Replicas: &replicas, + Template: v1.PodTemplateSpec{ + ObjectMeta: *objectMeta, + Spec: *podSpec, + }, + RestartPolicy: kubeflowv1.RestartPolicyNever, + }, nil +} + +type kfDistributedReplicaSpec interface { + GetReplicas() int32 + GetImage() string + GetResources() *core.Resources + GetRestartPolicy() pluginsIdl.RestartPolicy + GetCommon() *pluginsIdl.CommonReplicaSpec +} + +type allowsCommandOverride interface { + GetCommand() []string +} + +func ToReplicaSpecWithOverrides(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext, rs kfDistributedReplicaSpec, primaryContainerName string, isMaster bool) (*kubeflowv1.ReplicaSpec, error) { + var replicas int32 + var image string + var resources *core.Resources + var restartPolicy pluginsIdl.RestartPolicy + + // replicas, image, resources, restartPolicy are deprecated since the common replica spec is introduced. + // Therefore, if the common replica spec is set, use that to get the common fields + common := rs.GetCommon() + if common != nil { + replicas = common.GetReplicas() + image = common.GetImage() + resources = common.GetResources() + restartPolicy = common.GetRestartPolicy() + } else { + replicas = rs.GetReplicas() + image = rs.GetImage() + resources = rs.GetResources() + restartPolicy = rs.GetRestartPolicy() + } + + taskCtxOptions := []flytek8s.PluginTaskExecutionContextOption{} + if resources != nil { + resources, err := flytek8s.ToK8sResourceRequirements(resources) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification on Resources [%v], Err: [%v]", resources, err.Error()) + } + + // Get extended resources for GPU accelerator info + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, err + } + extendedResources := flytek8s.ApplyExtendedResourcesOverrides( + taskTemplate.GetExtendedResources(), + taskCtx.TaskExecutionMetadata().GetOverrides().GetExtendedResources(), + ) + + // Normalize GPU resource names BEFORE applying overrides (e.g., "gpu" → "nvidia.com/gpu") + // This ensures ApplyK8sResourceOverrides can find GPU resources under the correct name and + // apply platform limits, and later toleration lookups succeed. + flytek8s.SanitizeGPUResourceRequirements(resources, extendedResources.GetGpuAccelerator()) + + *resources = flytek8s.ApplyK8sResourceOverrides(taskCtx.TaskExecutionMetadata(), resources) + taskCtxOptions = append(taskCtxOptions, flytek8s.WithResources(resources)) + } + newTaskCtx := flytek8s.NewPluginTaskExecutionContext(taskCtx, taskCtxOptions...) + replicaSpec, err := ToReplicaSpec(ctx, newTaskCtx, primaryContainerName) + if err != nil { + return nil, err + } + + // Master should have a single replica + if isMaster { + replicas := int32(1) + replicaSpec.Replicas = &replicas + } + + var command []string + if v, ok := rs.(allowsCommandOverride); ok { + command = v.GetCommand() + } + if err := OverrideContainerSpec( + &replicaSpec.Template.Spec, + primaryContainerName, + image, + command, + ); err != nil { + return nil, err + } + + replicaSpec.RestartPolicy = ParseRestartPolicy(restartPolicy) + + if !isMaster { + replicaSpec.Replicas = &replicas + } + + return replicaSpec, nil +} + +func GetReplicaCount(specs map[kubeflowv1.ReplicaType]*kubeflowv1.ReplicaSpec, replicaType kubeflowv1.ReplicaType) *int32 { + if spec, ok := specs[replicaType]; ok && spec.Replicas != nil { + return spec.Replicas + } + + return new(int32) // return 0 as default value +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go new file mode 100644 index 0000000000..c23c3cfb3d --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/common_operator_test.go @@ -0,0 +1,385 @@ +package common + +import ( + "fmt" + "os" + "testing" + "time" + + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestMain(m *testing.M) { + // All tests should run assuming UTC timezone. + time.Local = time.UTC + code := m.Run() + os.Exit(code) +} + +func TestExtractCurrentCondition(t *testing.T) { + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + Status: corev1.ConditionTrue, + } + jobRunningActive := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + Status: corev1.ConditionFalse, + } + jobConditions := []kubeflowv1.JobCondition{ + jobCreated, + jobRunningActive, + } + currentCondition, err := ExtractCurrentCondition(jobConditions) + assert.NoError(t, err) + assert.Equal(t, currentCondition, jobCreated) + assert.Equal(t, currentCondition, jobCreated) + + jobConditions = nil + currentCondition, err = ExtractCurrentCondition(jobConditions) + assert.NoError(t, err) + assert.Equal(t, currentCondition, kubeflowv1.JobCondition{}) + + currentCondition, err = ExtractCurrentCondition(nil) + assert.NoError(t, err) + assert.Equal(t, currentCondition, kubeflowv1.JobCondition{}) + + jobUnknown := kubeflowv1.JobCondition{Type: "unknown"} + jobConditions = []kubeflowv1.JobCondition{jobUnknown} + currentCondition, err = ExtractCurrentCondition(jobConditions) + assert.Error(t, err) + assert.Equal(t, currentCondition, kubeflowv1.JobCondition{}) + assert.Equal(t, currentCondition, kubeflowv1.JobCondition{}) + assert.Equal(t, err, fmt.Errorf("found no current condition. Conditions: %+v", jobConditions)) +} + +func TestGetPhaseInfo(t *testing.T) { + jobCreating := kubeflowv1.JobCondition{} + taskPhase, err := GetPhaseInfo(jobCreating, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + } + taskPhase, err = GetPhaseInfo(jobCreated, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobSucceeded := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobSucceeded, + } + taskPhase, err = GetPhaseInfo(jobSucceeded, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobFailed := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobFailed, + } + taskPhase, err = GetPhaseInfo(jobFailed, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobRestarting := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRestarting, + } + taskPhase, err = GetPhaseInfo(jobRestarting, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobRestarting = kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + } + taskPhase, err = GetPhaseInfo(jobRestarting, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) +} + +func TestGetMPIPhaseInfo(t *testing.T) { + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + } + taskPhase, err := GetMPIPhaseInfo(jobCreated, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobSucceeded := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobSucceeded, + } + taskPhase, err = GetMPIPhaseInfo(jobSucceeded, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobFailed := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobFailed, + } + taskPhase, err = GetMPIPhaseInfo(jobFailed, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobRestarting := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRestarting, + } + taskPhase, err = GetMPIPhaseInfo(jobRestarting, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + jobRestarting = kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + } + taskPhase, err = GetMPIPhaseInfo(jobRestarting, time.Now(), pluginsCore.TaskInfo{}) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) +} + +func TestGetLogs(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + })) + + workers := int32(1) + launcher := int32(1) + + taskTemplate := dummyTaskTemplate() + taskCtx := dummyTaskContext() + mpiJobObjectMeta := meta_v1.ObjectMeta{ + Name: "test", + Namespace: "mpi-namespace", + } + jobLogs, err := GetLogs(taskCtx, MPITaskType, mpiJobObjectMeta, taskTemplate, false, workers, launcher, 0, 0, kubeflowv1.MPIJobDefaultContainerName) + assert.NoError(t, err) + assert.Equal(t, 1, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", "mpi-namespace", "test"), jobLogs[0].Uri) + + pytorchJobObjectMeta := meta_v1.ObjectMeta{ + Name: "test", + Namespace: "pytorch-namespace", + } + jobLogs, err = GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, taskTemplate, true, workers, launcher, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + assert.NoError(t, err) + assert.Equal(t, 2, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", "pytorch-namespace", "test"), jobLogs[1].Uri) + + tensorflowJobObjectMeta := meta_v1.ObjectMeta{ + Name: "test", + Namespace: "tensorflow-namespace", + } + jobLogs, err = GetLogs(taskCtx, TensorflowTaskType, tensorflowJobObjectMeta, taskTemplate, false, workers, launcher, 1, 0, kubeflowv1.TFJobDefaultContainerName) + assert.NoError(t, err) + assert.Equal(t, 3, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", "tensorflow-namespace", "test"), jobLogs[2].Uri) + +} + +func TestGetLogsTemplateUri(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsStackDriverEnabled: true, + StackDriverTemplateURI: "https://console.cloud.google.com/logs/query;query=resource.labels.pod_name={{.podName}}×tamp>{{.podRFC3339StartTime}}", + })) + + taskTemplate := dummyTaskTemplate() + taskCtx := dummyTaskContext() + pytorchJobObjectMeta := meta_v1.ObjectMeta{ + Name: "test", + Namespace: "pytorch-" + + "namespace", + CreationTimestamp: meta_v1.Time{ + Time: time.Date(2022, time.January, 1, 12, 0, 0, 0, time.UTC), + }, + } + jobLogs, err := GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, taskTemplate, true, 1, 0, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + assert.NoError(t, err) + assert.Equal(t, 2, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-master-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("https://console.cloud.google.com/logs/query;query=resource.labels.pod_name=%s-worker-0×tamp>%s", "test", "2022-01-01T12:00:00Z"), jobLogs[1].Uri) +} + +func TestGetLogsDynamic(t *testing.T) { + dynamicLinks := map[string]tasklog.TemplateLogPlugin{ + "test-dynamic-link": { + TemplateURIs: []string{"https://some-service.com/{{.taskConfig.dynamicParam}}"}, + }, + } + + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + DynamicLogLinks: dynamicLinks, + })) + + taskTemplate := dummyTaskTemplate() + taskTemplate.Config = map[string]string{ + "link_type": "test-dynamic-link", + "dynamicParam": "dynamic-value", + } + taskCtx := dummyTaskContext() + pytorchJobObjectMeta := meta_v1.ObjectMeta{ + Name: "test", + Namespace: "pytorch-" + + "namespace", + CreationTimestamp: meta_v1.Time{ + Time: time.Date(2022, time.January, 1, 12, 0, 0, 0, time.UTC), + }, + } + jobLogs, err := GetLogs(taskCtx, PytorchTaskType, pytorchJobObjectMeta, taskTemplate, true, 1, 0, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + assert.NoError(t, err) + assert.Equal(t, 2, len(jobLogs)) + assert.Equal(t, "https://some-service.com/dynamic-value", jobLogs[0].GetUri()) +} + +func dummyPodSpec() v1.PodSpec { + return v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary container", + Args: []string{"pyflyte-execute", "--task-module", "tests.flytekit.unit.sdk.tasks.test_sidecar_tasks", "--task-name", "simple_sidecar_task", "--inputs", "{{.input}}", "--output-prefix", "{{.outputPrefix}}"}, + Image: "dummy-image", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("200Mi"), + "gpu": resource.MustParse("1"), + }, + Requests: v1.ResourceList{ + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("100Mi"), + "gpu": resource.MustParse("1"), + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "volume mount", + }, + }, + }, + { + Name: "secondary container", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "gpu": resource.MustParse("2"), + }, + Requests: v1.ResourceList{ + "gpu": resource.MustParse("2"), + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "dshm", + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "my toleration key", + Value: "my toleration value", + }, + }, + } +} + +func TestOverrideContainerSpec(t *testing.T) { + podSpec := dummyPodSpec() + err := OverrideContainerSpec( + &podSpec, "primary container", "testing-image", + []string{"python", "-m", "run.py"}, + ) + assert.NoError(t, err) + assert.Equal(t, 2, len(podSpec.Containers)) + assert.Equal(t, "testing-image", podSpec.Containers[0].Image) + assert.Equal(t, []string{"python", "-m", "run.py"}, podSpec.Containers[0].Args) +} + +func TestOverrideContainerSpecEmptyFields(t *testing.T) { + podSpec := dummyPodSpec() + err := OverrideContainerSpec(&podSpec, "primary container", "", []string{}) + assert.NoError(t, err) + assert.Equal(t, 2, len(podSpec.Containers)) + assert.Equal(t, "dummy-image", podSpec.Containers[0].Image) + assert.Equal(t, []string{"pyflyte-execute", "--task-module", "tests.flytekit.unit.sdk.tasks.test_sidecar_tasks", "--task-name", "simple_sidecar_task", "--inputs", "{{.input}}", "--output-prefix", "{{.outputPrefix}}"}, podSpec.Containers[0].Args) +} + +func dummyTaskTemplate() *core.TaskTemplate { + id := "dummy-id" + + testImage := "dummy-image" + + structObj := structpb.Struct{} + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + }, + }, + Custom: &structObj, + } +} + +func dummyTaskContext() *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Name: "my-task-name", + Project: "my-task-project", + Domain: "my-task-domain", + Version: "1", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my-execution-name", + Project: "my-execution-project", + Domain: "my-execution-domain", + }, + }, + RetryAttempt: 0, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + pCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + return pCtx +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config.go new file mode 100644 index 0000000000..22ea48ad08 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config.go @@ -0,0 +1,32 @@ +package common + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + Timeout: config.Duration{Duration: 1 * time.Minute}, + } + + configSection = pluginsConfig.MustRegisterSubSection("kf-operator", &defaultConfig) +) + +// Config is config for 'pytorch' plugin +type Config struct { + // If kubeflow operator doesn't update the status of the task after this timeout, the task will be considered failed. + Timeout config.Duration `json:"timeout,omitempty"` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags.go new file mode 100755 index 0000000000..9fb5c02976 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package common + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "timeout"), defaultConfig.Timeout.String(), "") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags_test.go new file mode 100755 index 0000000000..0afdf456cd --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/common/config_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package common + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_timeout", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultConfig.Timeout.String() + + cmdFlags.Set("timeout", testValue) + if vString, err := cmdFlags.GetString("timeout"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.Timeout) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go new file mode 100644 index 0000000000..68925e04df --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi.go @@ -0,0 +1,217 @@ +package mpi + +import ( + "context" + "fmt" + "strings" + "time" + + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + flyteerr "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +const workerSpecCommandKey = "worker_spec_command" + +type mpiOperatorResourceHandler struct { +} + +// Sanity test that the plugin implements method of k8s.Plugin +var _ k8s.Plugin = mpiOperatorResourceHandler{} + +func (mpiOperatorResourceHandler) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +// Defines a func to create a query object (typically just object and type meta portions) that's used to query k8s +// resources. +func (mpiOperatorResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return &kubeflowv1.MPIJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.MPIJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + }, nil +} + +// Defines a func to create the full resource object that will be posted to k8s. +func (mpiOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "nil task specification") + } + + slots := int32(1) + runPolicy := kubeflowv1.RunPolicy{} + + var launcherReplicaSpec, workerReplicaSpec *kubeflowv1.ReplicaSpec + + if taskTemplate.TaskTypeVersion == 0 { + mpiTaskExtraArgs := plugins.DistributedMPITrainingTask{} + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &mpiTaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + replicaSpec, err := common.ToReplicaSpec(ctx, taskCtx, kubeflowv1.MPIJobDefaultContainerName) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create replica spec: [%v]", err.Error()) + } + launcherReplicaSpec = replicaSpec.DeepCopy() + // TODO (jeev): Is this even a valid configuration. Can there be more than 1 + // launcher? TaskTypeVersion 1 does not support overriding this value. + launcherReplicas := mpiTaskExtraArgs.GetNumLauncherReplicas() + if launcherReplicas < 1 { + launcherReplicas = 1 + } + launcherReplicaSpec.Replicas = &launcherReplicas + workerReplicaSpec = replicaSpec.DeepCopy() + workerReplicas := mpiTaskExtraArgs.GetNumWorkers() + workerReplicaSpec.Replicas = &workerReplicas + slots = mpiTaskExtraArgs.GetSlots() + + // V1 requires passing worker command as template config parameter + taskTemplateConfig := taskTemplate.GetConfig() + workerSpecCommand := []string{} + if val, ok := taskTemplateConfig[workerSpecCommandKey]; ok { + workerSpecCommand = strings.Split(val, " ") + } + + for k := range workerReplicaSpec.Template.Spec.Containers { + if workerReplicaSpec.Template.Spec.Containers[k].Name == kubeflowv1.MPIJobDefaultContainerName { + workerReplicaSpec.Template.Spec.Containers[k].Args = workerSpecCommand + workerReplicaSpec.Template.Spec.Containers[k].Command = []string{} + } + } + + } else if taskTemplate.TaskTypeVersion == 1 { + kfMPITaskExtraArgs := kfplugins.DistributedMPITrainingTask{} + + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfMPITaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + launcherReplicaSpec, err = common.ToReplicaSpecWithOverrides(ctx, taskCtx, kfMPITaskExtraArgs.GetLauncherReplicas(), kubeflowv1.MPIJobDefaultContainerName, true) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create launcher replica spec: [%v]", err.Error()) + } + + workerReplicaSpec, err = common.ToReplicaSpecWithOverrides(ctx, taskCtx, kfMPITaskExtraArgs.GetWorkerReplicas(), kubeflowv1.MPIJobDefaultContainerName, false) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create worker replica spec: [%v]", err.Error()) + } + + if kfMPITaskExtraArgs.GetRunPolicy() != nil { + runPolicy = common.ParseRunPolicy(*kfMPITaskExtraArgs.GetRunPolicy()) + } + + } else { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + } + + if *workerReplicaSpec.Replicas <= 0 { + return nil, fmt.Errorf("number of workers must be greater than 0") + } + if *launcherReplicaSpec.Replicas <= 0 { + return nil, fmt.Errorf("number of launchers must be greater than 0") + } + + jobSpec := kubeflowv1.MPIJobSpec{ + SlotsPerWorker: &slots, + RunPolicy: runPolicy, + MPIReplicaSpecs: map[kubeflowv1.ReplicaType]*kubeflowv1.ReplicaSpec{ + kubeflowv1.MPIJobReplicaTypeLauncher: launcherReplicaSpec, + kubeflowv1.MPIJobReplicaTypeWorker: workerReplicaSpec, + }, + } + + job := &kubeflowv1.MPIJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.MPIJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + Spec: jobSpec, + } + + return job, nil +} + +// Analyzes the k8s resource and reports the status as TaskPhase. This call is expected to be relatively fast, +// any operations that might take a long time (limits are configured system-wide) should be offloaded to the +// background. +func (mpiOperatorResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) { + var numWorkers, numLauncherReplicas *int32 + app, ok := resource.(*kubeflowv1.MPIJob) + if !ok { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("failed to convert resource data type") + } + + taskTemplate, err := pluginContext.TaskReader().Read(ctx) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + numWorkers = common.GetReplicaCount(app.Spec.MPIReplicaSpecs, kubeflowv1.MPIJobReplicaTypeWorker) + numLauncherReplicas = common.GetReplicaCount(app.Spec.MPIReplicaSpecs, kubeflowv1.MPIJobReplicaTypeLauncher) + + taskLogs, err := common.GetLogs(pluginContext, common.MPITaskType, app.ObjectMeta, taskTemplate, false, + *numWorkers, *numLauncherReplicas, 0, 0, kubeflowv1.MPIJobDefaultContainerName) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + if app.Status.StartTime == nil && app.CreationTimestamp.Add(common.GetConfig().Timeout.Duration).Before(time.Now()) { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("kubeflow operator hasn't updated the mpi custom resource since creation time %v", app.CreationTimestamp) + } + currentCondition, err := common.ExtractCurrentCondition(app.Status.Conditions) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + occurredAt := time.Now() + statusDetails, _ := utils.MarshalObjToStruct(app.Status) + taskPhaseInfo := pluginsCore.TaskInfo{ + Logs: taskLogs, + LogContext: nil, // TODO populate log context + OccurredAt: &occurredAt, + CustomInfo: statusDetails, + } + + phaseInfo, err := common.GetPhaseInfo(currentCondition, occurredAt, taskPhaseInfo) + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + + return phaseInfo, err +} + +func init() { + if err := kubeflowv1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: common.MPITaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{common.MPITaskType}, + ResourceToWatch: &kubeflowv1.MPIJob{}, + Plugin: mpiOperatorResourceHandler{}, + IsDefault: false, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go new file mode 100644 index 0000000000..22741d6641 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/mpi/mpi_test.go @@ -0,0 +1,1007 @@ +package mpi + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + flytek8sConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + stdlibUtils "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +const testImage = "image://" +const serviceAccount = "mpi_sa" +const mpiID = "the job 1" +const mpiID2 = "the job 2" + +var ( + dummyEnvVars = []*core.KeyValuePair{ + {Key: "Env_Var", Value: "Env_Val"}, + } + + testArgs = []string{ + "test-args", + } + + dummyAnnotations = map[string]string{ + "annotation-key": "annotation-value", + } + dummyLabels = map[string]string{ + "label-key": "label-value", + } + + resourceRequirements = &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + }, + } + + jobName = "the-job" + jobNamespace = "mpi-namespace" +) + +func dummyMPICustomObj(workers int32, launcher int32, slots int32) *plugins.DistributedMPITrainingTask { + return &plugins.DistributedMPITrainingTask{ + NumWorkers: workers, + NumLauncherReplicas: launcher, + Slots: slots, + } +} + +func dummyMPITaskTemplate(id string, args ...interface{}) *core.TaskTemplate { + + var mpiObjJSON string + var err error + + for _, arg := range args { + switch t := arg.(type) { + case *kfplugins.DistributedMPITrainingTask: + var mpiCustomObj = t + mpiObjJSON, err = utils.MarshalToString(mpiCustomObj) + case *plugins.DistributedMPITrainingTask: + var mpiCustomObj = t + mpiObjJSON, err = utils.MarshalToString(mpiCustomObj) + default: + err = fmt.Errorf("Unknown input type %T", t) + } + } + + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + + err = stdlibUtils.UnmarshalStringToPb(mpiObjJSON, &structObj) + if err != nil { + panic(err) + } + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + Env: dummyEnvVars, + }, + }, + Custom: &structObj, + } +} + +func dummyMPITaskContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return("") + overrides.OnGetPodTemplate().Return(nil) + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + taskCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&k8s.PluginState{}).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = k8s.PluginState{} + return 0 + }, + func(v interface{}) error { + return nil + }) + + taskCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return taskCtx +} + +func dummyMPIPluginContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources, pluginState k8s.PluginState) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + pCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return("") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + pCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + + pCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return pCtx +} + +func dummyMPIJobResource(mpiResourceHandler mpiOperatorResourceHandler, + workers int32, launcher int32, slots int32, conditionType kubeflowv1.JobConditionType) *kubeflowv1.MPIJob { + var jobConditions []kubeflowv1.JobCondition + + now := time.Now() + + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + Status: corev1.ConditionTrue, + Reason: "MPICreated", + Message: "MPIJob the-job is created.", + LastUpdateTime: v1.Time{ + Time: now, + }, + LastTransitionTime: v1.Time{ + Time: now, + }, + } + jobRunningActive := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + Status: corev1.ConditionTrue, + Reason: "MPIJobRunning", + Message: "MPIJob the-job is running.", + LastUpdateTime: v1.Time{ + Time: now.Add(time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(time.Minute), + }, + } + jobRunningInactive := *jobRunningActive.DeepCopy() + jobRunningInactive.Status = corev1.ConditionFalse + jobSucceeded := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobSucceeded, + Status: corev1.ConditionTrue, + Reason: "MPIJobSucceeded", + Message: "MPIJob the-job is successfully completed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobFailed := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobFailed, + Status: corev1.ConditionTrue, + Reason: "MPIJobFailed", + Message: "MPIJob the-job is failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobRestarting := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRestarting, + Status: corev1.ConditionTrue, + Reason: "MPIJobRestarting", + Message: "MPIJob the-job is restarting because some replica(s) failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + } + + switch conditionType { + case kubeflowv1.JobCreated: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + } + case kubeflowv1.JobRunning: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningActive, + } + case kubeflowv1.JobSucceeded: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobSucceeded, + } + case kubeflowv1.JobFailed: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + } + case kubeflowv1.JobRestarting: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + jobRestarting, + } + } + + mpiObj := dummyMPICustomObj(workers, launcher, slots) + taskTemplate := dummyMPITaskTemplate(mpiID, mpiObj) + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + if err != nil { + panic(err) + } + + return &kubeflowv1.MPIJob{ + ObjectMeta: v1.ObjectMeta{ + Name: jobName, + Namespace: jobNamespace, + }, + Spec: resource.(*kubeflowv1.MPIJob).Spec, + Status: kubeflowv1.JobStatus{ + Conditions: jobConditions, + ReplicaStatuses: nil, + StartTime: &v1.Time{Time: time.Now()}, + CompletionTime: nil, + LastReconcileTime: nil, + }, + } +} + +func TestBuildResourceMPI(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + + mpiObj := dummyMPICustomObj(100, 50, 1) + taskTemplate := dummyMPITaskTemplate(mpiID2, mpiObj) + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + mpiJob, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + assert.Equal(t, int32(50), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Replicas) + assert.Equal(t, int32(100), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *mpiJob.Spec.SlotsPerWorker) + + // verify TaskExecutionMetadata labels and annotations are copied to the MPIJob + for k, v := range dummyAnnotations { + for _, replicaSpec := range mpiJob.Spec.MPIReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Annotations[k]) + } + } + for k, v := range dummyLabels { + for _, replicaSpec := range mpiJob.Spec.MPIReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Labels[k]) + } + } + + for _, replicaSpec := range mpiJob.Spec.MPIReplicaSpecs { + for _, container := range replicaSpec.Template.Spec.Containers { + assert.Equal(t, resourceRequirements.Requests, container.Resources.Requests) + assert.Equal(t, resourceRequirements.Limits, container.Resources.Limits) + } + } +} + +func TestBuildResourceMPIForWrongInput(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + + mpiObj := dummyMPICustomObj(0, 0, 1) + taskTemplate := dummyMPITaskTemplate(mpiID, mpiObj) + + _, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.Error(t, err) + + mpiObj = dummyMPICustomObj(1, 1, 1) + taskTemplate = dummyMPITaskTemplate(mpiID2, mpiObj) + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + app, ok := resource.(*kubeflowv1.MPIJob) + assert.Nil(t, err) + assert.Equal(t, true, ok) + assert.Equal(t, []string{}, app.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Command) + assert.Equal(t, []string{}, app.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Args) +} + +func TestBuildResourceMPIExtendedResources(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, + })) + + fixtures := []struct { + name string + resources *corev1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []corev1.NodeSelectorTerm + expectedTol []corev1.Toleration + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + corev1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + mpiObj := dummyMPICustomObj(100, 50, 1) + taskTemplate := dummyMPITaskTemplate(mpiID2, mpiObj) + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskContext := dummyMPITaskContext(taskTemplate, f.resources, f.extendedResourcesOverride) + mpiResourceHandler := mpiOperatorResourceHandler{} + r, err := mpiResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + mpiJob, ok := r.(*kubeflowv1.MPIJob) + assert.True(t, ok) + + for _, replicaSpec := range mpiJob.Spec.MPIReplicaSpecs { + assert.EqualValues( + t, + f.expectedNsr, + replicaSpec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + replicaSpec.Template.Spec.Tolerations, + ) + } + }) + } +} + +func TestGetTaskPhase(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + ctx := context.TODO() + + dummyMPIJobResourceCreator := func(conditionType kubeflowv1.JobConditionType) *kubeflowv1.MPIJob { + return dummyMPIJobResource(mpiResourceHandler, 2, 1, 1, conditionType) + } + + pluginContext := dummyMPIPluginContext(dummyMPITaskTemplate("", dummyMPICustomObj(2, 1, 1)), resourceRequirements, nil, k8s.PluginState{}) + taskPhase, err := mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResourceCreator(kubeflowv1.JobCreated)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResourceCreator(kubeflowv1.JobRunning)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResourceCreator(kubeflowv1.JobSucceeded)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResourceCreator(kubeflowv1.JobFailed)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResourceCreator(kubeflowv1.JobRestarting)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseQueued, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + pluginContext := dummyMPIPluginContext(dummyMPITaskTemplate("", dummyMPICustomObj(2, 1, 1)), resourceRequirements, nil, pluginState) + + taskPhase, err := mpiResourceHandler.GetTaskPhase(ctx, pluginContext, dummyMPIJobResource(mpiResourceHandler, 2, 1, 1, kubeflowv1.JobCreated)) + + assert.NoError(t, err) + assert.Equal(t, taskPhase.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func TestGetLogs(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + })) + + workers := int32(2) + launcher := int32(1) + slots := int32(1) + + mpiResourceHandler := mpiOperatorResourceHandler{} + mpiJob := dummyMPIJobResource(mpiResourceHandler, workers, launcher, slots, kubeflowv1.JobRunning) + taskTemplate := dummyMPITaskTemplate("", dummyMPICustomObj(workers, launcher, slots)) + pluginContext := dummyMPIPluginContext(taskTemplate, resourceRequirements, nil, k8s.PluginState{}) + jobLogs, err := common.GetLogs(pluginContext, common.MPITaskType, mpiJob.ObjectMeta, taskTemplate, false, workers, launcher, 0, 0, kubeflowv1.MPIJobDefaultContainerName) + + assert.NoError(t, err) + assert.Equal(t, 2, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=mpi-namespace", jobNamespace, jobName), jobLogs[1].Uri) +} + +func TestGetProperties(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + expected := k8s.PluginProperties{} + assert.Equal(t, expected, mpiResourceHandler.GetProperties()) +} + +func TestReplicaCounts(t *testing.T) { + for _, test := range []struct { + name string + launcherReplicaCount int32 + workerReplicaCount int32 + expectError bool + contains []kubeflowv1.ReplicaType + notContains []kubeflowv1.ReplicaType + }{ + {"NoWorkers", 1, 0, true, nil, nil}, + {"Minimum One Launcher", 0, 1, false, []kubeflowv1.ReplicaType{kubeflowv1.MPIJobReplicaTypeLauncher, kubeflowv1.MPIJobReplicaTypeWorker}, []kubeflowv1.ReplicaType{}}, + {"Works", 1, 1, false, []kubeflowv1.ReplicaType{kubeflowv1.MPIJobReplicaTypeLauncher, kubeflowv1.MPIJobReplicaTypeWorker}, []kubeflowv1.ReplicaType{}}, + } { + t.Run(test.name, func(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + + mpiObj := dummyMPICustomObj(test.workerReplicaCount, test.launcherReplicaCount, 1) + taskTemplate := dummyMPITaskTemplate(mpiID2, mpiObj) + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + if test.expectError { + assert.Error(t, err) + assert.Nil(t, resource) + return + } + + assert.NoError(t, err) + assert.NotNil(t, resource) + + job, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + + assert.Len(t, job.Spec.MPIReplicaSpecs, len(test.contains)) + for _, replicaType := range test.contains { + assert.Contains(t, job.Spec.MPIReplicaSpecs, replicaType) + } + for _, replicaType := range test.notContains { + assert.NotContains(t, job.Spec.MPIReplicaSpecs, replicaType) + } + }) + } +} + +func TestBuildResourceMPIV1(t *testing.T) { + launcherCommand := []string{"python", "launcher.py"} + workerCommand := []string{"/usr/sbin/sshd", "/.sshd_config"} + taskConfigs := []*kfplugins.DistributedMPITrainingTask{ + { + LauncherReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + Command: launcherCommand, + }, + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + Command: workerCommand, + }, + Slots: int32(1), + }, + { + LauncherReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + }, + Command: launcherCommand, + }, + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + Command: workerCommand, + }, + Slots: int32(1), + }, + } + + for _, taskConfig := range taskConfigs { + launcherResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("250Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + } + + workerResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + mpiResourceHandler := mpiOperatorResourceHandler{} + + taskTemplate := dummyMPITaskTemplate(mpiID2, taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + mpiJob, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + assert.Equal(t, int32(1), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Replicas) + assert.Equal(t, int32(100), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *mpiJob.Spec.SlotsPerWorker) + assert.Equal(t, *launcherResourceRequirements, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Template.Spec.Containers[0].Resources) + assert.Equal(t, *workerResourceRequirements, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Resources) + assert.Equal(t, launcherCommand, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Template.Spec.Containers[0].Args) + assert.Equal(t, workerCommand, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Args) + } +} + +func TestBuildResourceMPIV1WithOnlyWorkerReplica(t *testing.T) { + workerCommand := []string{"/usr/sbin/sshd", "/.sshd_config"} + + taskConfigs := []*kfplugins.DistributedMPITrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + Command: []string{"/usr/sbin/sshd", "/.sshd_config"}, + }, + Slots: int32(1), + }, + { + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + Command: []string{"/usr/sbin/sshd", "/.sshd_config"}, + }, + Slots: int32(1), + }, + } + + for _, taskConfig := range taskConfigs { + workerResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + mpiResourceHandler := mpiOperatorResourceHandler{} + + taskTemplate := dummyMPITaskTemplate(mpiID2, taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + mpiJob, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + assert.Equal(t, int32(1), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Replicas) + assert.Equal(t, int32(100), *mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *mpiJob.Spec.SlotsPerWorker) + assert.Equal(t, *workerResourceRequirements, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Resources) + assert.Equal(t, testArgs, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Template.Spec.Containers[0].Args) + assert.Equal(t, workerCommand, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Containers[0].Args) + } +} + +func TestBuildResourceMPIV1ResourceTolerations(t *testing.T) { + gpuToleration := corev1.Toleration{ + Key: "nvidia.com/gpu", + Value: "present", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + } + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuResourceName: flytek8s.ResourceNvidiaGPU, + ResourceTolerations: map[corev1.ResourceName][]corev1.Toleration{ + flytek8s.ResourceNvidiaGPU: {gpuToleration}, + }, + })) + + taskConfigs := []*kfplugins.DistributedMPITrainingTask{ + { + LauncherReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + { + LauncherReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedMPITrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + mpiResourceHandler := mpiOperatorResourceHandler{} + + taskTemplate := dummyMPITaskTemplate(mpiID2, taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + mpiJob, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + + assert.NotContains(t, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeLauncher].Template.Spec.Tolerations, gpuToleration) + assert.Contains(t, mpiJob.Spec.MPIReplicaSpecs[kubeflowv1.MPIJobReplicaTypeWorker].Template.Spec.Tolerations, gpuToleration) + } +} + +func TestGetReplicaCount(t *testing.T) { + mpiResourceHandler := mpiOperatorResourceHandler{} + tfObj := dummyMPICustomObj(1, 1, 0) + taskTemplate := dummyMPITaskTemplate("the job", tfObj) + resource, err := mpiResourceHandler.BuildResource(context.TODO(), dummyMPITaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + MPIJob, ok := resource.(*kubeflowv1.MPIJob) + assert.True(t, ok) + + assert.NotNil(t, common.GetReplicaCount(MPIJob.Spec.MPIReplicaSpecs, kubeflowv1.MPIJobReplicaTypeWorker)) + assert.NotNil(t, common.GetReplicaCount(MPIJob.Spec.MPIReplicaSpecs, kubeflowv1.MPIJobReplicaTypeLauncher)) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go new file mode 100644 index 0000000000..3fa9ad1546 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch.go @@ -0,0 +1,278 @@ +package pytorch + +import ( + "context" + "fmt" + "strings" + "time" + + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + "github.com/samber/lo" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + flyteerr "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +type pytorchOperatorResourceHandler struct { +} + +// Sanity test that the plugin implements method of k8s.Plugin +var _ k8s.Plugin = pytorchOperatorResourceHandler{} + +func (pytorchOperatorResourceHandler) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +// Defines a func to create a query object (typically just object and type meta portions) that's used to query k8s +// resources. +func (pytorchOperatorResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return &kubeflowv1.PyTorchJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.PyTorchJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + }, nil +} + +// Defines a func to create the full resource object that will be posted to k8s. +func (pytorchOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "nil task specification") + } + + runPolicy := kubeflowv1.RunPolicy{} + var elasticPolicy *kubeflowv1.ElasticPolicy + + var masterReplicaSpec, workerReplicaSpec *kubeflowv1.ReplicaSpec + + if taskTemplate.TaskTypeVersion == 0 { + pytorchTaskExtraArgs := plugins.DistributedPyTorchTrainingTask{} + + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &pytorchTaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + replicaSpec, err := common.ToReplicaSpec(ctx, taskCtx, kubeflowv1.PyTorchJobDefaultContainerName) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create replica spec: [%v]", err.Error()) + } + masterReplicaSpec = replicaSpec.DeepCopy() + masterReplicas := int32(1) + masterReplicaSpec.Replicas = &masterReplicas + workerReplicaSpec = replicaSpec.DeepCopy() + workerReplicas := pytorchTaskExtraArgs.GetWorkers() + workerReplicaSpec.Replicas = &workerReplicas + + // Set elastic config + elasticConfig := pytorchTaskExtraArgs.GetElasticConfig() + if elasticConfig != nil { + elasticPolicy = ParseElasticConfig(elasticConfig) + } + } else if taskTemplate.TaskTypeVersion == 1 { + kfPytorchTaskExtraArgs := kfplugins.DistributedPyTorchTrainingTask{} + + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfPytorchTaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + masterReplicaSpec, err = common.ToReplicaSpecWithOverrides(ctx, taskCtx, kfPytorchTaskExtraArgs.GetMasterReplicas(), kubeflowv1.PyTorchJobDefaultContainerName, true) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create master replica spec: [%v]", err.Error()) + } + + workerReplicaSpec, err = common.ToReplicaSpecWithOverrides(ctx, taskCtx, kfPytorchTaskExtraArgs.GetWorkerReplicas(), kubeflowv1.PyTorchJobDefaultContainerName, false) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create worker replica spec: [%v]", err.Error()) + } + + if kfPytorchTaskExtraArgs.GetRunPolicy() != nil { + runPolicy = common.ParseRunPolicy(*kfPytorchTaskExtraArgs.GetRunPolicy()) + } + // Set elastic config + elasticConfig := kfPytorchTaskExtraArgs.GetElasticConfig() + if elasticConfig != nil { + elasticPolicy = ParseElasticConfig(elasticConfig) + } + } else { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + } + + if *workerReplicaSpec.Replicas <= 0 { + return nil, fmt.Errorf("number of workers must be greater than 0") + } + + jobSpec := kubeflowv1.PyTorchJobSpec{ + PyTorchReplicaSpecs: map[kubeflowv1.ReplicaType]*kubeflowv1.ReplicaSpec{ + kubeflowv1.PyTorchJobReplicaTypeMaster: masterReplicaSpec, + kubeflowv1.PyTorchJobReplicaTypeWorker: workerReplicaSpec, + }, + RunPolicy: runPolicy, + } + + if elasticPolicy != nil { + jobSpec.ElasticPolicy = elasticPolicy + // Remove master replica spec if elastic policy is set + delete(jobSpec.PyTorchReplicaSpecs, kubeflowv1.PyTorchJobReplicaTypeMaster) + } + + job := &kubeflowv1.PyTorchJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.PyTorchJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + Spec: jobSpec, + } + + return job, nil +} + +// Interface for unified elastic config handling across plugin version v0 and v1. This interface should +// always be aligned with the ElasticConfig defined in flyteidl. +type ElasticConfig interface { + GetMinReplicas() int32 + GetMaxReplicas() int32 + GetNprocPerNode() int32 + GetMaxRestarts() int32 + GetRdzvBackend() string +} + +// To support parsing elastic config from both v0 and v1 of kubeflow pytorch idl +func ParseElasticConfig(elasticConfig ElasticConfig) *kubeflowv1.ElasticPolicy { + minReplicas := elasticConfig.GetMinReplicas() + maxReplicas := elasticConfig.GetMaxReplicas() + nProcPerNode := elasticConfig.GetNprocPerNode() + maxRestarts := elasticConfig.GetMaxRestarts() + rdzvBackend := kubeflowv1.RDZVBackend(elasticConfig.GetRdzvBackend()) + return &kubeflowv1.ElasticPolicy{ + MinReplicas: &minReplicas, + MaxReplicas: &maxReplicas, + RDZVBackend: &rdzvBackend, + NProcPerNode: &nProcPerNode, + MaxRestarts: &maxRestarts, + } +} + +// Analyses the k8s resource and reports the status as TaskPhase. This call is expected to be relatively fast, +// any operations that might take a long time (limits are configured system-wide) should be offloaded to the +// background. +func (pytorchOperatorResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) { + app, ok := resource.(*kubeflowv1.PyTorchJob) + if !ok { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("failed to convert resource data type") + } + + // Elastic PytorchJobs don't use master replicas + hasMaster := false + if _, ok := app.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster]; ok { + hasMaster = true + } + + workersCount := common.GetReplicaCount(app.Spec.PyTorchReplicaSpecs, kubeflowv1.PyTorchJobReplicaTypeWorker) + + taskTemplate, err := pluginContext.TaskReader().Read(ctx) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + taskLogs, err := common.GetLogs(pluginContext, common.PytorchTaskType, app.ObjectMeta, taskTemplate, hasMaster, *workersCount, 0, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + if app.Status.StartTime == nil && app.CreationTimestamp.Add(common.GetConfig().Timeout.Duration).Before(time.Now()) { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("kubeflow operator hasn't updated the pytorch custom resource since creation time %v", app.CreationTimestamp) + } + currentCondition, err := common.ExtractCurrentCondition(app.Status.Conditions) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + occurredAt := time.Now() + statusDetails, _ := utils.MarshalObjToStruct(app.Status) + podList := &v1.PodList{} + err = pluginContext.K8sReader().List(ctx, podList) + if err != nil { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("failed to list pytorch execution pods. Error: %w", err) + } + logger.Debugf(ctx, "podlist: %+v", podList) + taskPhaseInfo := pluginsCore.TaskInfo{ + Logs: taskLogs, + LogContext: logContextForPods(app.Name, podList.Items), + OccurredAt: &occurredAt, + CustomInfo: statusDetails, + } + var phaseInfo pluginsCore.PhaseInfo + podName := fmt.Sprintf("%s-%s", app.Name, "worker-0") + phaseInfo, err = flytek8s.DemystifyFailedOrPendingPod(ctx, pluginContext, taskPhaseInfo, app.Namespace, podName, "pytorch") + if err != nil { + logger.Errorf(ctx, "Failed to demystify pod status for pytorch worker-0/master. Error: %v", err) + } + if phaseInfo.Phase().IsFailure() { + // If the master node or worker-0 is in a failure state, we can fail fast without checking the PytorchJob status. + return phaseInfo, nil + } + logger.Debugf(ctx, "logcontext: %+v", taskPhaseInfo.LogContext) + logger.Debugf(ctx, "PyTorchJob phase is %s", phaseInfo.Phase()) + logger.Debugf(ctx, "PytorchJob currentCondition: %v", currentCondition) + phaseInfo, err = common.GetPhaseInfo(currentCondition, occurredAt, taskPhaseInfo) + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + + return phaseInfo, err +} + +func logContextForPods(pytorchJobName string, pods []v1.Pod) *core.LogContext { + pods = lo.Filter(pods, func(item v1.Pod, _ int) bool { + // Running, Succeeded or Failed is OK + return item.Status.Phase != v1.PodPending + }) + logCtx := &core.LogContext{ + Pods: make([]*core.PodLogContext, len(pods)), + } + for i, pod := range pods { + p := pod + if strings.HasPrefix(p.Name, pytorchJobName) && strings.Contains(p.Name, "worker-0") { + logCtx.PrimaryPodName = p.Name + } + logCtx.Pods[i] = flytek8s.BuildPodLogContext(&p) + } + return logCtx +} + +func init() { + if err := kubeflowv1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: common.PytorchTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{common.PytorchTaskType}, + ResourceToWatch: &kubeflowv1.PyTorchJob{}, + Plugin: pytorchOperatorResourceHandler{}, + IsDefault: false, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go new file mode 100644 index 0000000000..5c3703b5eb --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/pytorch/pytorch_test.go @@ -0,0 +1,1429 @@ +package pytorch + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + flytek8sConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + stdlibUtils "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +const testImage = "image://" +const testImageMaster = "image://master" +const serviceAccount = "pytorch_sa" + +var ( + dummyEnvVars = []*core.KeyValuePair{ + {Key: "Env_Var", Value: "Env_Val"}, + } + + testArgs = []string{ + "test-args", + } + + dummyAnnotations = map[string]string{ + "annotation-key": "annotation-value", + } + dummyLabels = map[string]string{ + "label-key": "label-value", + } + + resourceRequirements = &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + } + + jobName = "the-job" + jobNamespace = "pytorch-namespace" +) + +func dummyPytorchCustomObj(workers int32) *plugins.DistributedPyTorchTrainingTask { + return &plugins.DistributedPyTorchTrainingTask{ + Workers: workers, + } +} + +func dummyElasticPytorchCustomObj(workers int32, elasticConfig plugins.ElasticConfig) *plugins.DistributedPyTorchTrainingTask { + return &plugins.DistributedPyTorchTrainingTask{ + Workers: workers, + ElasticConfig: &elasticConfig, + } +} + +func dummyPytorchTaskTemplate(id string, args ...interface{}) *core.TaskTemplate { + + var ptObjJSON string + var err error + + for _, arg := range args { + switch t := arg.(type) { + case *kfplugins.DistributedPyTorchTrainingTask: + var pytorchCustomObj = t + ptObjJSON, err = utils.MarshalToString(pytorchCustomObj) + case *plugins.DistributedPyTorchTrainingTask: + var pytorchCustomObj = t + ptObjJSON, err = utils.MarshalToString(pytorchCustomObj) + default: + err = fmt.Errorf("Unknown input type %T", t) + } + } + + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + + err = stdlibUtils.UnmarshalStringToPb(ptObjJSON, &structObj) + if err != nil { + panic(err) + } + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + Env: dummyEnvVars, + }, + }, + Custom: &structObj, + } +} + +func dummyPytorchTaskContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources, containerImage string) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return(containerImage) + overrides.OnGetPodTemplate().Return(nil) + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + taskCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&k8s.PluginState{}).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = k8s.PluginState{} + return 0 + }, + func(v interface{}) error { + return nil + }) + + taskCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return taskCtx +} + +func dummyPytorchPluginContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, pluginState k8s.PluginState) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + pCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(nil) + overrides.OnGetContainerImage().Return("") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + pCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + + pCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return pCtx +} + +func dummyPytorchJobResource(pytorchResourceHandler pytorchOperatorResourceHandler, workers int32, conditionType kubeflowv1.JobConditionType) *kubeflowv1.PyTorchJob { + var jobConditions []kubeflowv1.JobCondition + + now := time.Now() + + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + Status: corev1.ConditionTrue, + Reason: "PyTorchJobCreated", + Message: "PyTorchJob the-job is created.", + LastUpdateTime: v1.Time{ + Time: now, + }, + LastTransitionTime: v1.Time{ + Time: now, + }, + } + jobRunningActive := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + Status: corev1.ConditionTrue, + Reason: "PyTorchJobRunning", + Message: "PyTorchJob the-job is running.", + LastUpdateTime: v1.Time{ + Time: now.Add(time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(time.Minute), + }, + } + jobRunningInactive := *jobRunningActive.DeepCopy() + jobRunningInactive.Status = corev1.ConditionFalse + jobSucceeded := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobSucceeded, + Status: corev1.ConditionTrue, + Reason: "PyTorchJobSucceeded", + Message: "PyTorchJob the-job is successfully completed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobFailed := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobFailed, + Status: corev1.ConditionTrue, + Reason: "PyTorchJobFailed", + Message: "PyTorchJob the-job is failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobRestarting := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRestarting, + Status: corev1.ConditionTrue, + Reason: "PyTorchJobRestarting", + Message: "PyTorchJob the-job is restarting because some replica(s) failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + } + + switch conditionType { + case kubeflowv1.JobCreated: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + } + case kubeflowv1.JobRunning: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningActive, + } + case kubeflowv1.JobSucceeded: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobSucceeded, + } + case kubeflowv1.JobFailed: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + } + case kubeflowv1.JobRestarting: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + jobRestarting, + } + } + + ptObj := dummyPytorchCustomObj(workers) + taskTemplate := dummyPytorchTaskTemplate("job1", ptObj) + resource, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + if err != nil { + panic(err) + } + + return &kubeflowv1.PyTorchJob{ + ObjectMeta: v1.ObjectMeta{ + CreationTimestamp: v1.Time{Time: time.Now()}, + Name: jobName, + Namespace: jobNamespace, + }, + Spec: resource.(*kubeflowv1.PyTorchJob).Spec, + Status: kubeflowv1.JobStatus{ + Conditions: jobConditions, + ReplicaStatuses: nil, + StartTime: nil, + CompletionTime: nil, + LastReconcileTime: nil, + }, + } +} + +func TestBuildResourcePytorchElastic(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + ptObj := dummyElasticPytorchCustomObj(2, plugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"}) + taskTemplate := dummyPytorchTaskTemplate("job2", ptObj) + + resource, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, resource) + + pytorchJob, ok := resource.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + assert.Equal(t, int32(2), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.NotNil(t, pytorchJob.Spec.ElasticPolicy) + assert.Equal(t, int32(1), *pytorchJob.Spec.ElasticPolicy.MinReplicas) + assert.Equal(t, int32(2), *pytorchJob.Spec.ElasticPolicy.MaxReplicas) + assert.Equal(t, int32(4), *pytorchJob.Spec.ElasticPolicy.NProcPerNode) + assert.Equal(t, kubeflowv1.RDZVBackend("c10d"), *pytorchJob.Spec.ElasticPolicy.RDZVBackend) + + assert.Equal(t, 1, len(pytorchJob.Spec.PyTorchReplicaSpecs)) + assert.Contains(t, pytorchJob.Spec.PyTorchReplicaSpecs, kubeflowv1.PyTorchJobReplicaTypeWorker) + + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + var hasContainerWithDefaultPytorchName = false + podSpec := replicaSpec.Template.Spec + for _, container := range podSpec.Containers { + if container.Name == kubeflowv1.PyTorchJobDefaultContainerName { + hasContainerWithDefaultPytorchName = true + } + } + + assert.True(t, hasContainerWithDefaultPytorchName) + + // verify TaskExecutionMetadata labels and annotations are copied to the PyTorchJob + for k, v := range dummyAnnotations { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Annotations[k]) + } + for k, v := range dummyLabels { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Labels[k]) + } + } +} + +func TestBuildResourcePytorch(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + ptObj := dummyPytorchCustomObj(100) + taskTemplate := dummyPytorchTaskTemplate("job3", ptObj) + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, res) + + pytorchJob, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + assert.Equal(t, int32(100), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.Nil(t, pytorchJob.Spec.ElasticPolicy) + + // verify TaskExecutionMetadata labels and annotations are copied to the TensorFlowJob + for k, v := range dummyAnnotations { + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Annotations[k]) + } + } + for k, v := range dummyLabels { + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Labels[k]) + } + } + + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + var hasContainerWithDefaultPytorchName = false + for _, container := range replicaSpec.Template.Spec.Containers { + if container.Name == kubeflowv1.PyTorchJobDefaultContainerName { + hasContainerWithDefaultPytorchName = true + } + + assert.Equal(t, resourceRequirements.Requests, container.Resources.Requests, fmt.Sprintf(" container.Resources.Requests [%+v]", container.Resources.Requests.Cpu().String())) + assert.Equal(t, resourceRequirements.Limits, container.Resources.Limits, fmt.Sprintf(" container.Resources.Limits [%+v]", container.Resources.Limits.Cpu().String())) + } + + assert.True(t, hasContainerWithDefaultPytorchName) + } +} + +func TestBuildResourcePytorchContainerImage(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{})) + + fixtures := []struct { + name string + resources *corev1.ResourceRequirements + containerImageOverride string + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "", + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "container-image-override", + }, + } + + testConfigs := []struct { + name string + plugin *plugins.DistributedPyTorchTrainingTask + }{ + { + "pytorch", + dummyPytorchCustomObj(100), + }, + { + "elastic pytorch", + dummyElasticPytorchCustomObj(2, plugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"}), + }, + } + + for _, tCfg := range testConfigs { + for _, f := range fixtures { + t.Run(tCfg.name+" "+f.name, func(t *testing.T) { + taskTemplate := dummyPytorchTaskTemplate("job", tCfg.plugin) + taskContext := dummyPytorchTaskContext(taskTemplate, f.resources, nil, f.containerImageOverride) + pytorchResourceHandler := pytorchOperatorResourceHandler{} + r, err := pytorchResourceHandler.BuildResource(context.TODO(), taskContext) + assert.NoError(t, err) + assert.NotNil(t, r) + pytorchJob, ok := r.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + var expectedContainerImage string + if len(f.containerImageOverride) > 0 { + expectedContainerImage = f.containerImageOverride + } else { + expectedContainerImage = testImage + } + assert.Equal(t, expectedContainerImage, replicaSpec.Template.Spec.Containers[0].Image) + } + }) + } + } +} + +func TestBuildResourcePytorchExtendedResources(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, + })) + + fixtures := []struct { + name string + resources *corev1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []corev1.NodeSelectorTerm + expectedTol []corev1.Toleration + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + corev1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + + testConfigs := []struct { + name string + plugin *plugins.DistributedPyTorchTrainingTask + }{ + { + "pytorch", + dummyPytorchCustomObj(100), + }, + { + "elastic pytorch", + dummyElasticPytorchCustomObj(2, plugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"}), + }, + } + + for _, tCfg := range testConfigs { + for _, f := range fixtures { + t.Run(tCfg.name+" "+f.name, func(t *testing.T) { + taskTemplate := dummyPytorchTaskTemplate("job", tCfg.plugin) + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskContext := dummyPytorchTaskContext(taskTemplate, f.resources, f.extendedResourcesOverride, "") + pytorchResourceHandler := pytorchOperatorResourceHandler{} + r, err := pytorchResourceHandler.BuildResource(context.TODO(), taskContext) + assert.NoError(t, err) + assert.NotNil(t, r) + pytorchJob, ok := r.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + for _, replicaSpec := range pytorchJob.Spec.PyTorchReplicaSpecs { + assert.EqualValues( + t, + f.expectedNsr, + replicaSpec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + replicaSpec.Template.Spec.Tolerations, + ) + } + }) + } + } +} + +func TestGetTaskPhase(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + ctx := context.TODO() + + dummyPytorchJobResourceCreator := func(conditionType kubeflowv1.JobConditionType) *kubeflowv1.PyTorchJob { + return dummyPytorchJobResource(pytorchResourceHandler, 2, conditionType) + } + + pluginContext := dummyPytorchPluginContext(dummyPytorchTaskTemplate("", dummyPytorchCustomObj(2)), resourceRequirements, k8s.PluginState{}) + podList := []runtime.Object{ + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns", Name: "initializing ignored pod"}, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, + &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{Namespace: "ns", Name: "test"}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "test-work-0"}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "test-work-0", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: v1.Time{Time: time.Now()}, + FinishedAt: v1.Time{Time: time.Now()}, + }, + }, + }, + }, + }, + }, + } + reader := fake.NewFakeClient(podList...) + pluginContext.OnK8sReader().Return(reader) + taskPhase, err := pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResourceCreator(kubeflowv1.JobCreated)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResourceCreator(kubeflowv1.JobRunning)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResourceCreator(kubeflowv1.JobSucceeded)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResourceCreator(kubeflowv1.JobFailed)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResourceCreator(kubeflowv1.JobRestarting)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, taskPhase.Info().LogContext.Pods[0].PodName, "test") + assert.Nil(t, err) +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseQueued, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + pluginCtx := dummyPytorchPluginContext(dummyPytorchTaskTemplate("", dummyPytorchCustomObj(2)), resourceRequirements, pluginState) + reader := fake.NewFakeClient() + pluginCtx.OnK8sReader().Return(reader) + taskPhase, err := pytorchResourceHandler.GetTaskPhase(ctx, pluginCtx, dummyPytorchJobResource(pytorchResourceHandler, 4, kubeflowv1.JobCreated)) + + assert.NoError(t, err) + assert.Equal(t, taskPhase.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func TestGetLogs(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + })) + + hasMaster := true + workers := int32(2) + + pytorchResourceHandler := pytorchOperatorResourceHandler{} + pytorchJob := dummyPytorchJobResource(pytorchResourceHandler, workers, kubeflowv1.JobRunning) + taskTemplate := dummyPytorchTaskTemplate("", dummyPytorchCustomObj(workers)) + pluginContext := dummyPytorchPluginContext(taskTemplate, resourceRequirements, k8s.PluginState{}) + jobLogs, err := common.GetLogs(pluginContext, common.PytorchTaskType, pytorchJob.ObjectMeta, taskTemplate, hasMaster, workers, 0, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + + assert.NoError(t, err) + assert.Equal(t, 3, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-master-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[2].Uri) +} + +func TestGetLogsElastic(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + })) + + hasMaster := false + workers := int32(2) + + pytorchResourceHandler := pytorchOperatorResourceHandler{} + pytorchJob := dummyPytorchJobResource(pytorchResourceHandler, workers, kubeflowv1.JobRunning) + taskTemplate := dummyPytorchTaskTemplate("", dummyPytorchCustomObj(workers)) + pluginContext := dummyPytorchPluginContext(taskTemplate, resourceRequirements, k8s.PluginState{}) + jobLogs, err := common.GetLogs(pluginContext, common.PytorchTaskType, pytorchJob.ObjectMeta, taskTemplate, hasMaster, workers, 0, 0, 0, kubeflowv1.PyTorchJobDefaultContainerName) + + assert.NoError(t, err) + assert.Equal(t, 2, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=pytorch-namespace", jobNamespace, jobName), jobLogs[1].Uri) +} + +func TestGetProperties(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + expected := k8s.PluginProperties{} + assert.Equal(t, expected, pytorchResourceHandler.GetProperties()) +} + +func TestReplicaCounts(t *testing.T) { + for _, test := range []struct { + name string + workerReplicaCount int32 + expectError bool + contains []kubeflowv1.ReplicaType + notContains []kubeflowv1.ReplicaType + }{ + {"NoWorkers", 0, true, nil, nil}, + {"Works", 1, false, []kubeflowv1.ReplicaType{kubeflowv1.PyTorchJobReplicaTypeMaster, kubeflowv1.PyTorchJobReplicaTypeWorker}, []kubeflowv1.ReplicaType{}}, + } { + t.Run(test.name, func(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + ptObj := dummyPytorchCustomObj(test.workerReplicaCount) + taskTemplate := dummyPytorchTaskTemplate("the job", ptObj) + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + if test.expectError { + assert.Error(t, err) + assert.Nil(t, res) + return + } + + assert.NoError(t, err) + assert.NotNil(t, res) + + job, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + assert.Len(t, job.Spec.PyTorchReplicaSpecs, len(test.contains)) + for _, replicaType := range test.contains { + assert.Contains(t, job.Spec.PyTorchReplicaSpecs, replicaType) + } + for _, replicaType := range test.notContains { + assert.NotContains(t, job.Spec.PyTorchReplicaSpecs, replicaType) + } + }) + } +} + +func TestBuildResourcePytorchV1(t *testing.T) { + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + MasterReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Image: testImageMaster, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + { + MasterReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Image: testImageMaster, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + }, + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + masterResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("250Mi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("500Mi"), + }, + } + + workerResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + taskTemplate := dummyPytorchTaskTemplate("job4", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, res) + + pytorchJob, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + assert.Equal(t, int32(100), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Replicas) + + assert.Equal(t, testImageMaster, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Template.Spec.Containers[0].Image) + assert.Equal(t, testImage, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers[0].Image) + + assert.Equal(t, *masterResourceRequirements, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Template.Spec.Containers[0].Resources) + assert.Equal(t, *workerResourceRequirements, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers[0].Resources) + + assert.Equal(t, kubeflowv1.RestartPolicyAlways, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].RestartPolicy) + assert.Equal(t, kubeflowv1.RestartPolicyNever, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].RestartPolicy) + + assert.Nil(t, pytorchJob.Spec.RunPolicy.CleanPodPolicy) + assert.Nil(t, pytorchJob.Spec.RunPolicy.BackoffLimit) + assert.Nil(t, pytorchJob.Spec.RunPolicy.TTLSecondsAfterFinished) + assert.Nil(t, pytorchJob.Spec.RunPolicy.ActiveDeadlineSeconds) + + assert.Nil(t, pytorchJob.Spec.ElasticPolicy) + } +} + +func TestBuildResourcePytorchV1WithRunPolicy(t *testing.T) { + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 100, + }, + RunPolicy: &kfplugins.RunPolicy{ + CleanPodPolicy: kfplugins.CleanPodPolicy_CLEANPOD_POLICY_ALL, + BackoffLimit: 100, + ActiveDeadlineSeconds: 1000, + TtlSecondsAfterFinished: 10000, + }, + }, + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + }, + }, + RunPolicy: &kfplugins.RunPolicy{ + CleanPodPolicy: kfplugins.CleanPodPolicy_CLEANPOD_POLICY_ALL, + BackoffLimit: 100, + ActiveDeadlineSeconds: 1000, + TtlSecondsAfterFinished: 10000, + }, + }, + } + + for _, taskConfig := range taskConfigs { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + taskTemplate := dummyPytorchTaskTemplate("job5", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, res) + + pytorchJob, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + assert.Equal(t, int32(100), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Replicas) + assert.Equal(t, kubeflowv1.CleanPodPolicyAll, *pytorchJob.Spec.RunPolicy.CleanPodPolicy) + assert.Equal(t, int32(100), *pytorchJob.Spec.RunPolicy.BackoffLimit) + assert.Equal(t, int64(1000), *pytorchJob.Spec.RunPolicy.ActiveDeadlineSeconds) + assert.Equal(t, int32(10000), *pytorchJob.Spec.RunPolicy.TTLSecondsAfterFinished) + } +} + +func TestBuildResourcePytorchV1WithOnlyWorkerSpec(t *testing.T) { + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + // Master Replica should use resource from task override if not set + taskOverrideResourceRequirements := &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + } + + workerResourceRequirements := &corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + } + + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + taskTemplate := dummyPytorchTaskTemplate("job5", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, res) + + pytorchJob, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + assert.Equal(t, int32(100), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(1), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Replicas) + + assert.Equal(t, testImage, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Template.Spec.Containers[0].Image) + assert.Equal(t, testImage, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers[0].Image) + + assert.Equal(t, *taskOverrideResourceRequirements, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Template.Spec.Containers[0].Resources) + assert.Equal(t, *workerResourceRequirements, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers[0].Resources) + + assert.Equal(t, kubeflowv1.RestartPolicyNever, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].RestartPolicy) + assert.Equal(t, kubeflowv1.RestartPolicyNever, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].RestartPolicy) + + assert.Nil(t, pytorchJob.Spec.ElasticPolicy) + } +} + +func TestBuildResourcePytorchV1ResourceTolerations(t *testing.T) { + gpuToleration := corev1.Toleration{ + Key: "nvidia.com/gpu", + Value: "present", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + } + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuResourceName: flytek8s.ResourceNvidiaGPU, + ResourceTolerations: map[corev1.ResourceName][]corev1.Toleration{ + flytek8s.ResourceNvidiaGPU: {gpuToleration}, + }, + })) + + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + MasterReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + { + MasterReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "250Mi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "500Mi"}, + }, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + taskTemplate := dummyPytorchTaskTemplate("job4", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + res, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, res) + + pytorchJob, ok := res.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + assert.NotContains(t, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeMaster].Template.Spec.Tolerations, gpuToleration) + assert.Contains(t, pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Tolerations, gpuToleration) + } +} + +func TestBuildResourcePytorchV1WithElastic(t *testing.T) { + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 2, + }, + ElasticConfig: &kfplugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"}, + }, + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 2, + }, + }, + ElasticConfig: &kfplugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"}, + }, + } + + for _, taskConfig := range taskConfigs { + taskTemplate := dummyPytorchTaskTemplate("job5", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + pytorchResourceHandler := pytorchOperatorResourceHandler{} + resource, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, resource) + + pytorchJob, ok := resource.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + assert.Equal(t, int32(2), *pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Replicas) + assert.NotNil(t, pytorchJob.Spec.ElasticPolicy) + assert.Equal(t, int32(1), *pytorchJob.Spec.ElasticPolicy.MinReplicas) + assert.Equal(t, int32(2), *pytorchJob.Spec.ElasticPolicy.MaxReplicas) + assert.Equal(t, int32(4), *pytorchJob.Spec.ElasticPolicy.NProcPerNode) + assert.Equal(t, kubeflowv1.RDZVBackend("c10d"), *pytorchJob.Spec.ElasticPolicy.RDZVBackend) + + assert.Equal(t, 1, len(pytorchJob.Spec.PyTorchReplicaSpecs)) + assert.Contains(t, pytorchJob.Spec.PyTorchReplicaSpecs, kubeflowv1.PyTorchJobReplicaTypeWorker) + + var hasContainerWithDefaultPytorchName = false + + for _, container := range pytorchJob.Spec.PyTorchReplicaSpecs[kubeflowv1.PyTorchJobReplicaTypeWorker].Template.Spec.Containers { + if container.Name == kubeflowv1.PyTorchJobDefaultContainerName { + hasContainerWithDefaultPytorchName = true + } + } + + assert.True(t, hasContainerWithDefaultPytorchName) + } +} + +func TestBuildResourcePytorchV1WithZeroWorker(t *testing.T) { + taskConfigs := []*kfplugins.DistributedPyTorchTrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Replicas: 0, + }, + }, + { + WorkerReplicas: &kfplugins.DistributedPyTorchTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 0, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + + taskTemplate := dummyPytorchTaskTemplate("job5", taskConfig) + taskTemplate.TaskTypeVersion = 1 + _, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.Error(t, err) + } +} + +func TestParseElasticConfig(t *testing.T) { + elasticConfig := plugins.ElasticConfig{MinReplicas: 1, MaxReplicas: 2, NprocPerNode: 4, RdzvBackend: "c10d"} + elasticPolicy := ParseElasticConfig(&elasticConfig) + assert.Equal(t, int32(1), *elasticPolicy.MinReplicas) + assert.Equal(t, int32(2), *elasticPolicy.MaxReplicas) + assert.Equal(t, int32(4), *elasticPolicy.NProcPerNode) + assert.Equal(t, kubeflowv1.RDZVBackend("c10d"), *elasticPolicy.RDZVBackend) +} + +func TestGetReplicaCount(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + tfObj := dummyPytorchCustomObj(1) + taskTemplate := dummyPytorchTaskTemplate("the job", tfObj) + resource, err := pytorchResourceHandler.BuildResource(context.TODO(), dummyPytorchTaskContext(taskTemplate, resourceRequirements, nil, "")) + assert.NoError(t, err) + assert.NotNil(t, resource) + PytorchJob, ok := resource.(*kubeflowv1.PyTorchJob) + assert.True(t, ok) + + assert.NotNil(t, common.GetReplicaCount(PytorchJob.Spec.PyTorchReplicaSpecs, kubeflowv1.PyTorchJobReplicaTypeWorker)) +} + +func TestGetTaskPhaseWithFailedPod(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + ctx := context.TODO() + + // Create a failed worker-0 pod + pod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: jobName + "-worker-0", + Namespace: jobNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pytorch", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "pytorch", + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + Message: "Container failed", + }, + }, + }, + }, + }, + } + + pluginContext := dummyPytorchPluginContext(dummyPytorchTaskTemplate("", dummyPytorchCustomObj(2)), resourceRequirements, k8s.PluginState{}) + reader := fake.NewFakeClient(pod) + pluginContext.OnK8sReader().Return(reader) + + // Even though PyTorchJob status is running, should return failure due to pod status + taskPhase, err := pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResource(pytorchResourceHandler, 2, kubeflowv1.JobRunning)) + assert.NoError(t, err) + assert.True(t, taskPhase.Phase().IsFailure()) +} + +func TestGetTaskPhaseWithCrashLoopBackOff(t *testing.T) { + pytorchResourceHandler := pytorchOperatorResourceHandler{} + ctx := context.TODO() + + // Create a worker-0 pod in crash loop + pod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: jobName + "-worker-0", + Namespace: jobNamespace, + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "pytorch", + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: "pytorch", + Ready: false, + RestartCount: 5, + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "CrashLoopBackOff", + Message: "Back-off restarting failed container", + }, + }, + LastTerminationState: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + }, + }, + }, + }, + }, + } + + pluginContext := dummyPytorchPluginContext(dummyPytorchTaskTemplate("", dummyPytorchCustomObj(2)), resourceRequirements, k8s.PluginState{}) + reader := fake.NewFakeClient(pod) + pluginContext.OnK8sReader().Return(reader) + + // CrashLoopBackOff should eventually lead to failure + taskPhase, err := pytorchResourceHandler.GetTaskPhase(ctx, pluginContext, dummyPytorchJobResource(pytorchResourceHandler, 2, kubeflowv1.JobRunning)) + assert.NoError(t, err) + // CrashLoopBackOff may not immediately fail, so we just check it doesn't crash + assert.NotNil(t, taskPhase) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go new file mode 100644 index 0000000000..3ecaa30562 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow.go @@ -0,0 +1,216 @@ +package tensorflow + +import ( + "context" + "fmt" + "time" + + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + flyteerr "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +type tensorflowOperatorResourceHandler struct { +} + +// Sanity test that the plugin implements method of k8s.Plugin +var _ k8s.Plugin = tensorflowOperatorResourceHandler{} + +func (tensorflowOperatorResourceHandler) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +// Defines a func to create a query object (typically just object and type meta portions) that's used to query k8s +// resources. +func (tensorflowOperatorResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return &kubeflowv1.TFJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.TFJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + }, nil +} + +// Defines a func to create the full resource object that will be posted to k8s. +func (tensorflowOperatorResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "nil task specification") + } + + replicaSpecMap := make(map[kubeflowv1.ReplicaType]*kubeflowv1.ReplicaSpec) + runPolicy := kubeflowv1.RunPolicy{} + + if taskTemplate.TaskTypeVersion == 0 { + tensorflowTaskExtraArgs := plugins.DistributedTensorflowTrainingTask{} + + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &tensorflowTaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + replicaSpec, err := common.ToReplicaSpec(ctx, taskCtx, kubeflowv1.TFJobDefaultContainerName) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create replica spec: [%v]", err.Error()) + } + + replicaNumMap := map[kubeflowv1.ReplicaType]int32{ + kubeflowv1.TFJobReplicaTypeChief: tensorflowTaskExtraArgs.GetChiefReplicas(), + kubeflowv1.TFJobReplicaTypeWorker: tensorflowTaskExtraArgs.GetWorkers(), + kubeflowv1.TFJobReplicaTypePS: tensorflowTaskExtraArgs.GetPsReplicas(), + kubeflowv1.TFJobReplicaTypeEval: tensorflowTaskExtraArgs.GetEvaluatorReplicas(), + } + for t, r := range replicaNumMap { + rs := replicaSpec.DeepCopy() + replicas := r + if replicas > 0 { + rs.Replicas = &replicas + replicaSpecMap[t] = rs + } + } + + } else if taskTemplate.TaskTypeVersion == 1 { + kfTensorflowTaskExtraArgs := kfplugins.DistributedTensorflowTrainingTask{} + + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &kfTensorflowTaskExtraArgs) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + replicaSpecCfgMap := map[kubeflowv1.ReplicaType]*kfplugins.DistributedTensorflowTrainingReplicaSpec{ + kubeflowv1.TFJobReplicaTypeChief: kfTensorflowTaskExtraArgs.GetChiefReplicas(), + kubeflowv1.TFJobReplicaTypeWorker: kfTensorflowTaskExtraArgs.GetWorkerReplicas(), + kubeflowv1.TFJobReplicaTypePS: kfTensorflowTaskExtraArgs.GetPsReplicas(), + kubeflowv1.TFJobReplicaTypeEval: kfTensorflowTaskExtraArgs.GetEvaluatorReplicas(), + } + for t, cfg := range replicaSpecCfgMap { + // Short circuit if replica set has no replicas to avoid unnecessarily + // generating pod specs + var replicas int32 + // replicas is deprecated since the common replica spec is introduced. + // Therefore, if the common replica spec is set, use that to get the common fields + if cfg.GetCommon() != nil { + replicas = cfg.GetCommon().GetReplicas() + } else { + replicas = cfg.GetReplicas() + } + if replicas <= 0 { + continue + } + + rs, err := common.ToReplicaSpecWithOverrides(ctx, taskCtx, cfg, kubeflowv1.TFJobDefaultContainerName, false) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create replica spec: [%v]", err.Error()) + } + replicaSpecMap[t] = rs + } + + if kfTensorflowTaskExtraArgs.GetRunPolicy() != nil { + runPolicy = common.ParseRunPolicy(*kfTensorflowTaskExtraArgs.GetRunPolicy()) + } + + } else { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, + "Invalid TaskSpecification, unsupported task template version [%v] key", taskTemplate.TaskTypeVersion) + } + + if v, ok := replicaSpecMap[kubeflowv1.TFJobReplicaTypeWorker]; !ok || *v.Replicas <= 0 { + return nil, fmt.Errorf("number of workers must be greater than 0") + } + + jobSpec := kubeflowv1.TFJobSpec{ + TFReplicaSpecs: replicaSpecMap, + RunPolicy: runPolicy, + } + + job := &kubeflowv1.TFJob{ + TypeMeta: metav1.TypeMeta{ + Kind: kubeflowv1.TFJobKind, + APIVersion: kubeflowv1.SchemeGroupVersion.String(), + }, + Spec: jobSpec, + } + + return job, nil +} + +// Analyses the k8s resource and reports the status as TaskPhase. This call is expected to be relatively fast, +// any operations that might take a long time (limits are configured system-wide) should be offloaded to the +// background. +func (tensorflowOperatorResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) { + app, ok := resource.(*kubeflowv1.TFJob) + if !ok { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("failed to convert resource data type") + } + + taskTemplate, err := pluginContext.TaskReader().Read(ctx) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + workersCount := common.GetReplicaCount(app.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeWorker) + psReplicasCount := common.GetReplicaCount(app.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypePS) + chiefCount := common.GetReplicaCount(app.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeChief) + evaluatorReplicasCount := common.GetReplicaCount(app.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeEval) + + taskLogs, err := common.GetLogs(pluginContext, common.TensorflowTaskType, app.ObjectMeta, taskTemplate, false, + *workersCount, *psReplicasCount, *chiefCount, *evaluatorReplicasCount, kubeflowv1.TFJobDefaultContainerName) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + if app.Status.StartTime == nil && app.CreationTimestamp.Add(common.GetConfig().Timeout.Duration).Before(time.Now()) { + return pluginsCore.PhaseInfoUndefined, fmt.Errorf("kubeflow operator hasn't updated the tensorflow custom resource since creation time %v", app.CreationTimestamp) + } + + currentCondition, err := common.ExtractCurrentCondition(app.Status.Conditions) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + occurredAt := time.Now() + statusDetails, _ := utils.MarshalObjToStruct(app.Status) + taskPhaseInfo := pluginsCore.TaskInfo{ + Logs: taskLogs, + LogContext: nil, // TODO populate log context + OccurredAt: &occurredAt, + CustomInfo: statusDetails, + } + + phaseInfo, err := common.GetPhaseInfo(currentCondition, occurredAt, taskPhaseInfo) + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + + return phaseInfo, err +} + +func init() { + if err := kubeflowv1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: common.TensorflowTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{common.TensorflowTaskType}, + ResourceToWatch: &kubeflowv1.TFJob{}, + Plugin: tensorflowOperatorResourceHandler{}, + IsDefault: false, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go new file mode 100644 index 0000000000..6ae9276b2f --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/kfoperators/tensorflow/tensorflow_test.go @@ -0,0 +1,1171 @@ +package tensorflow + +import ( + "context" + "fmt" + "reflect" + "testing" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + kubeflowv1 "github.com/kubeflow/training-operator/pkg/apis/kubeflow.org/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + flytek8sConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/kfoperators/common" + stdlibUtils "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + kfplugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow" +) + +const testImage = "image://" +const serviceAccount = "tensorflow_sa" + +var ( + dummyEnvVars = []*core.KeyValuePair{ + {Key: "Env_Var", Value: "Env_Val"}, + } + + testArgs = []string{ + "test-args", + } + + dummyAnnotations = map[string]string{ + "annotation-key": "annotation-value", + } + dummyLabels = map[string]string{ + "label-key": "label-value", + } + + resourceRequirements = &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + } + + jobName = "the-job" + jobNamespace = "tensorflow-namespace" +) + +func dummyTensorFlowCustomObj(workers int32, psReplicas int32, chiefReplicas int32, evaluatorReplicas int32) *plugins.DistributedTensorflowTrainingTask { + return &plugins.DistributedTensorflowTrainingTask{ + Workers: workers, + PsReplicas: psReplicas, + ChiefReplicas: chiefReplicas, + EvaluatorReplicas: evaluatorReplicas, + } +} + +func dummyTensorFlowTaskTemplate(id string, args ...interface{}) *core.TaskTemplate { + + var tfObjJSON string + var err error + + for _, arg := range args { + switch t := arg.(type) { + case *kfplugins.DistributedTensorflowTrainingTask: + var tensorflowCustomObj = t + tfObjJSON, err = utils.MarshalToString(tensorflowCustomObj) + case *plugins.DistributedTensorflowTrainingTask: + var tensorflowCustomObj = t + tfObjJSON, err = utils.MarshalToString(tensorflowCustomObj) + default: + err = fmt.Errorf("Unknown input type %T", t) + } + } + + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + + err = stdlibUtils.UnmarshalStringToPb(tfObjJSON, &structObj) + if err != nil { + panic(err) + } + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + Env: dummyEnvVars, + }, + }, + Custom: &structObj, + } +} + +func dummyTensorFlowTaskContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return("") + overrides.OnGetPodTemplate().Return(nil) + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + taskCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&k8s.PluginState{}).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = k8s.PluginState{} + return 0 + }, + func(v interface{}) error { + return nil + }) + + taskCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return taskCtx +} + +func dummyTensorFlowPluginContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources, pluginState k8s.PluginState) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + pCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return("") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(dummyAnnotations) + taskExecutionMetadata.OnGetLabels().Return(dummyLabels) + taskExecutionMetadata.OnGetOwnerReference().Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + pCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + pCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + + return pCtx +} + +func dummyTensorFlowJobResource(tensorflowResourceHandler tensorflowOperatorResourceHandler, + workers int32, psReplicas int32, chiefReplicas int32, evaluatorReplicas int32, conditionType kubeflowv1.JobConditionType) *kubeflowv1.TFJob { + var jobConditions []kubeflowv1.JobCondition + + now := time.Now() + + jobCreated := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobCreated, + Status: corev1.ConditionTrue, + Reason: "TensorFlowJobCreated", + Message: "TensorFlowJob the-job is created.", + LastUpdateTime: v1.Time{ + Time: now, + }, + LastTransitionTime: v1.Time{ + Time: now, + }, + } + jobRunningActive := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRunning, + Status: corev1.ConditionTrue, + Reason: "TensorFlowJobRunning", + Message: "TensorFlowJob the-job is running.", + LastUpdateTime: v1.Time{ + Time: now.Add(time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(time.Minute), + }, + } + jobRunningInactive := *jobRunningActive.DeepCopy() + jobRunningInactive.Status = corev1.ConditionFalse + jobSucceeded := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobSucceeded, + Status: corev1.ConditionTrue, + Reason: "TensorFlowJobSucceeded", + Message: "TensorFlowJob the-job is successfully completed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobFailed := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobFailed, + Status: corev1.ConditionTrue, + Reason: "TensorFlowJobFailed", + Message: "TensorFlowJob the-job is failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(2 * time.Minute), + }, + } + jobRestarting := kubeflowv1.JobCondition{ + Type: kubeflowv1.JobRestarting, + Status: corev1.ConditionTrue, + Reason: "TensorFlowJobRestarting", + Message: "TensorFlowJob the-job is restarting because some replica(s) failed.", + LastUpdateTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + LastTransitionTime: v1.Time{ + Time: now.Add(3 * time.Minute), + }, + } + + switch conditionType { + case kubeflowv1.JobCreated: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + } + case kubeflowv1.JobRunning: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningActive, + } + case kubeflowv1.JobSucceeded: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobSucceeded, + } + case kubeflowv1.JobFailed: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + } + case kubeflowv1.JobRestarting: + jobConditions = []kubeflowv1.JobCondition{ + jobCreated, + jobRunningInactive, + jobFailed, + jobRestarting, + } + } + + tfObj := dummyTensorFlowCustomObj(workers, psReplicas, chiefReplicas, evaluatorReplicas) + taskTemplate := dummyTensorFlowTaskTemplate("the job", tfObj) + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + if err != nil { + panic(err) + } + + return &kubeflowv1.TFJob{ + ObjectMeta: v1.ObjectMeta{ + Name: jobName, + Namespace: jobNamespace, + }, + Spec: resource.(*kubeflowv1.TFJob).Spec, + Status: kubeflowv1.JobStatus{ + Conditions: jobConditions, + ReplicaStatuses: nil, + StartTime: &v1.Time{Time: time.Now()}, + CompletionTime: nil, + LastReconcileTime: nil, + }, + } +} + +func TestGetReplicaCount(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + tfObj := dummyTensorFlowCustomObj(1, 0, 0, 0) + taskTemplate := dummyTensorFlowTaskTemplate("the job", tfObj) + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + tensorflowJob, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + + assert.NotNil(t, common.GetReplicaCount(tensorflowJob.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeWorker)) + assert.NotNil(t, common.GetReplicaCount(tensorflowJob.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypePS)) + assert.NotNil(t, common.GetReplicaCount(tensorflowJob.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeChief)) + assert.NotNil(t, common.GetReplicaCount(tensorflowJob.Spec.TFReplicaSpecs, kubeflowv1.TFJobReplicaTypeEval)) +} + +func TestBuildResourceTensorFlow(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + + tfObj := dummyTensorFlowCustomObj(100, 50, 1, 1) + taskTemplate := dummyTensorFlowTaskTemplate("the job", tfObj) + + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + tensorflowJob, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + assert.Equal(t, int32(100), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(50), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypePS].Replicas) + assert.Equal(t, int32(1), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeChief].Replicas) + assert.Equal(t, int32(1), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeEval].Replicas) + + // verify TaskExecutionMetadata labels and annotations are copied to the TensorFlowJob + for k, v := range dummyAnnotations { + for _, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Annotations[k]) + } + } + for k, v := range dummyLabels { + for _, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + assert.Equal(t, v, replicaSpec.Template.ObjectMeta.Labels[k]) + } + } + + for _, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + var hasContainerWithDefaultTensorFlowName = false + podSpec := replicaSpec.Template.Spec + for _, container := range podSpec.Containers { + if container.Name == kubeflowv1.TFJobDefaultContainerName { + hasContainerWithDefaultTensorFlowName = true + } + + assert.Equal(t, resourceRequirements.Requests, container.Resources.Requests) + assert.Equal(t, resourceRequirements.Limits, container.Resources.Limits) + } + + assert.True(t, hasContainerWithDefaultTensorFlowName) + } +} + +func TestBuildResourceTensorFlowExtendedResources(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + })) + + fixtures := []struct { + name string + resources *corev1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []corev1.NodeSelectorTerm + expectedTol []corev1.Toleration + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + corev1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + corev1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + + v0TaskTemplate := dummyTensorFlowTaskTemplate("v0", dummyTensorFlowCustomObj(100, 50, 1, 1)) + v1TaskTemplates := []*core.TaskTemplate{ + dummyTensorFlowTaskTemplate("v1", &kfplugins.DistributedTensorflowTrainingTask{ + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 1, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 100, + }, + PsReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 50, + }, + EvaluatorReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 1, + }, + }), + dummyTensorFlowTaskTemplate("v1", &kfplugins.DistributedTensorflowTrainingTask{ + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 1, + }, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + }, + }, + PsReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 50, + }, + }, + EvaluatorReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 1, + }, + }, + }), + } + for _, v1TaskTemplate := range v1TaskTemplates { + v1TaskTemplate.TaskTypeVersion = 1 + testConfigs := []struct { + name string + taskTemplate *core.TaskTemplate + }{ + {"v0", v0TaskTemplate}, + {"v1", v1TaskTemplate}, + } + + for _, tCfg := range testConfigs { + for _, f := range fixtures { + t.Run(tCfg.name+" "+f.name, func(t *testing.T) { + taskTemplate := *tCfg.taskTemplate + taskTemplate.ExtendedResources = f.extendedResourcesBase + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + taskContext := dummyTensorFlowTaskContext(&taskTemplate, f.resources, f.extendedResourcesOverride) + r, err := tensorflowResourceHandler.BuildResource(context.TODO(), taskContext) + assert.NoError(t, err) + assert.NotNil(t, r) + tensorflowJob, ok := r.(*kubeflowv1.TFJob) + assert.True(t, ok) + + for _, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + assert.EqualValues( + t, + f.expectedNsr, + replicaSpec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + replicaSpec.Template.Spec.Tolerations, + ) + } + }) + } + } + } +} + +func TestGetTaskPhase(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + ctx := context.TODO() + + dummyTensorFlowJobResourceCreator := func(conditionType kubeflowv1.JobConditionType) *kubeflowv1.TFJob { + return dummyTensorFlowJobResource(tensorflowResourceHandler, 2, 1, 1, 1, conditionType) + } + + pluginContext := dummyTensorFlowPluginContext(dummyTensorFlowTaskTemplate("", dummyTensorFlowCustomObj(2, 1, 1, 1)), resourceRequirements, nil, k8s.PluginState{}) + taskPhase, err := tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResourceCreator(kubeflowv1.JobCreated)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResourceCreator(kubeflowv1.JobRunning)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResourceCreator(kubeflowv1.JobSucceeded)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResourceCreator(kubeflowv1.JobFailed)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) + + taskPhase, err = tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResourceCreator(kubeflowv1.JobRestarting)) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, taskPhase.Phase()) + assert.NotNil(t, taskPhase.Info()) + assert.Nil(t, err) +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseQueued, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + pluginContext := dummyTensorFlowPluginContext(dummyTensorFlowTaskTemplate("", dummyTensorFlowCustomObj(2, 1, 1, 1)), resourceRequirements, nil, pluginState) + + taskPhase, err := tensorflowResourceHandler.GetTaskPhase(ctx, pluginContext, dummyTensorFlowJobResource(tensorflowResourceHandler, 2, 1, 1, 1, kubeflowv1.JobCreated)) + + assert.NoError(t, err) + assert.Equal(t, taskPhase.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func TestGetLogs(t *testing.T) { + assert.NoError(t, logs.SetLogConfig(&logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + })) + + workers := int32(2) + psReplicas := int32(1) + chiefReplicas := int32(1) + evaluatorReplicas := int32(1) + + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + tensorFlowJob := dummyTensorFlowJobResource(tensorflowResourceHandler, workers, psReplicas, chiefReplicas, evaluatorReplicas, kubeflowv1.JobRunning) + taskTemplate := dummyTensorFlowTaskTemplate("", dummyTensorFlowCustomObj(workers, psReplicas, chiefReplicas, evaluatorReplicas)) + pluginContext := dummyTensorFlowPluginContext(taskTemplate, resourceRequirements, nil, k8s.PluginState{}) + jobLogs, err := common.GetLogs(pluginContext, common.TensorflowTaskType, tensorFlowJob.ObjectMeta, taskTemplate, false, + workers, psReplicas, chiefReplicas, evaluatorReplicas, kubeflowv1.TFJobDefaultContainerName) + + assert.NoError(t, err) + assert.Equal(t, 5, len(jobLogs)) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[0].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-worker-1/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[1].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-psReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[2].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-chiefReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[3].Uri) + assert.Equal(t, fmt.Sprintf("k8s.com/#!/log/%s/%s-evaluatorReplica-0/pod?namespace=tensorflow-namespace", jobNamespace, jobName), jobLogs[4].Uri) +} + +func TestGetProperties(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + expected := k8s.PluginProperties{} + assert.Equal(t, expected, tensorflowResourceHandler.GetProperties()) +} + +func TestReplicaCounts(t *testing.T) { + for _, test := range []struct { + name string + chiefReplicaCount int32 + psReplicaCount int32 + workerReplicaCount int32 + evaluatorReplicaCount int32 + expectError bool + contains []kubeflowv1.ReplicaType + notContains []kubeflowv1.ReplicaType + }{ + {"NoWorkers", 1, 1, 0, 1, true, nil, nil}, + {"SingleChief", 1, 0, 1, 0, false, + []kubeflowv1.ReplicaType{kubeflowv1.TFJobReplicaTypeChief, kubeflowv1.TFJobReplicaTypeWorker}, + []kubeflowv1.ReplicaType{kubeflowv1.TFJobReplicaTypePS, kubeflowv1.TFJobReplicaTypeEval}}, + {"SinglePS", 0, 1, 1, 0, false, + []kubeflowv1.ReplicaType{kubeflowv1.TFJobReplicaTypePS, kubeflowv1.TFJobReplicaTypeWorker}, + []kubeflowv1.ReplicaType{kubeflowv1.TFJobReplicaTypeChief, kubeflowv1.TFJobReplicaTypeEval}}, + {"AllContains", 1, 1, 1, 1, false, + []kubeflowv1.ReplicaType{kubeflowv1.TFJobReplicaTypePS, kubeflowv1.TFJobReplicaTypeWorker, kubeflowv1.TFJobReplicaTypeChief, kubeflowv1.TFJobReplicaTypeEval}, + nil, + }, + } { + t.Run(test.name, func(t *testing.T) { + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + + tfObj := dummyTensorFlowCustomObj(test.workerReplicaCount, test.psReplicaCount, test.chiefReplicaCount, test.evaluatorReplicaCount) + taskTemplate := dummyTensorFlowTaskTemplate("the job", tfObj) + + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + if test.expectError { + assert.Error(t, err) + assert.Nil(t, resource) + return + } + + assert.NoError(t, err) + assert.NotNil(t, resource) + + job, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + + assert.Len(t, job.Spec.TFReplicaSpecs, len(test.contains)) + for _, replicaType := range test.contains { + assert.Contains(t, job.Spec.TFReplicaSpecs, replicaType) + } + for _, replicaType := range test.notContains { + assert.NotContains(t, job.Spec.TFReplicaSpecs, replicaType) + } + }) + } +} + +func TestBuildResourceTensorFlowV1(t *testing.T) { + taskConfigs := []*kfplugins.DistributedTensorflowTrainingTask{ + { + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 1, + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + PsReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 50, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + EvaluatorReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 1, + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + RunPolicy: &kfplugins.RunPolicy{ + CleanPodPolicy: kfplugins.CleanPodPolicy_CLEANPOD_POLICY_ALL, + ActiveDeadlineSeconds: int32(100), + }, + }, + { + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 1, + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + PsReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 50, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + EvaluatorReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 1, + Image: testImage, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + RestartPolicy: plugins.RestartPolicy_RESTART_POLICY_ALWAYS, + }, + }, + RunPolicy: &kfplugins.RunPolicy{ + CleanPodPolicy: kfplugins.CleanPodPolicy_CLEANPOD_POLICY_ALL, + ActiveDeadlineSeconds: int32(100), + }, + }, + } + for _, taskConfig := range taskConfigs { + + resourceRequirementsMap := map[kubeflowv1.ReplicaType]*corev1.ResourceRequirements{ + kubeflowv1.TFJobReplicaTypeChief: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + kubeflowv1.TFJobReplicaTypeWorker: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + kubeflowv1.TFJobReplicaTypePS: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + kubeflowv1.TFJobReplicaTypeEval: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("250m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("500m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + }, + }, + } + + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + + taskTemplate := dummyTensorFlowTaskTemplate("v1", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + tensorflowJob, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + assert.Equal(t, int32(100), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeWorker].Replicas) + assert.Equal(t, int32(50), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypePS].Replicas) + assert.Equal(t, int32(1), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeChief].Replicas) + assert.Equal(t, int32(1), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeEval].Replicas) + + for replicaType, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + var hasContainerWithDefaultTensorFlowName = false + + for _, container := range replicaSpec.Template.Spec.Containers { + if container.Name == kubeflowv1.TFJobDefaultContainerName { + hasContainerWithDefaultTensorFlowName = true + assert.Equal(t, *resourceRequirementsMap[replicaType], container.Resources) + } + } + + assert.True(t, hasContainerWithDefaultTensorFlowName) + } + assert.Equal(t, kubeflowv1.CleanPodPolicyAll, *tensorflowJob.Spec.RunPolicy.CleanPodPolicy) + assert.Equal(t, int64(100), *tensorflowJob.Spec.RunPolicy.ActiveDeadlineSeconds) + } +} + +func TestBuildResourceTensorFlowV1WithOnlyWorker(t *testing.T) { + taskConfigs := []*kfplugins.DistributedTensorflowTrainingTask{ + { + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + { + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + resourceRequirementsMap := map[kubeflowv1.ReplicaType]*corev1.ResourceRequirements{ + kubeflowv1.TFJobReplicaTypeWorker: { + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1024m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("2048m"), + corev1.ResourceMemory: resource.MustParse("2Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + } + + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + + taskTemplate := dummyTensorFlowTaskTemplate("v1 with only worker replica", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + tensorflowJob, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + assert.Equal(t, int32(100), *tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeWorker].Replicas) + assert.Nil(t, tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeChief]) + assert.Nil(t, tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypePS]) + + for replicaType, replicaSpec := range tensorflowJob.Spec.TFReplicaSpecs { + var hasContainerWithDefaultTensorFlowName = false + + for _, container := range replicaSpec.Template.Spec.Containers { + if container.Name == kubeflowv1.TFJobDefaultContainerName { + hasContainerWithDefaultTensorFlowName = true + assert.Equal(t, *resourceRequirementsMap[replicaType], container.Resources) + } + } + + assert.True(t, hasContainerWithDefaultTensorFlowName) + } + } +} + +func TestBuildResourceTensorFlowV1ResourceTolerations(t *testing.T) { + gpuToleration := corev1.Toleration{ + Key: "nvidia.com/gpu", + Value: "present", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + } + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuResourceName: flytek8s.ResourceNvidiaGPU, + ResourceTolerations: map[corev1.ResourceName][]corev1.Toleration{ + flytek8s.ResourceNvidiaGPU: {gpuToleration}, + }, + })) + + taskConfigs := []*kfplugins.DistributedTensorflowTrainingTask{ + { + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 1, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + { + ChiefReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 1, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "250m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "500m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + }, + }, + }, + }, + WorkerReplicas: &kfplugins.DistributedTensorflowTrainingReplicaSpec{ + Common: &plugins.CommonReplicaSpec{ + Replicas: 100, + Resources: &core.Resources{ + Requests: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "1024m"}, + {Name: core.Resources_MEMORY, Value: "1Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + Limits: []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "2048m"}, + {Name: core.Resources_MEMORY, Value: "2Gi"}, + {Name: core.Resources_GPU, Value: "1"}, + }, + }, + }, + }, + }, + } + + for _, taskConfig := range taskConfigs { + + tensorflowResourceHandler := tensorflowOperatorResourceHandler{} + + taskTemplate := dummyTensorFlowTaskTemplate("v1", taskConfig) + taskTemplate.TaskTypeVersion = 1 + + resource, err := tensorflowResourceHandler.BuildResource(context.TODO(), dummyTensorFlowTaskContext(taskTemplate, resourceRequirements, nil)) + assert.NoError(t, err) + assert.NotNil(t, resource) + + tensorflowJob, ok := resource.(*kubeflowv1.TFJob) + assert.True(t, ok) + + assert.NotContains(t, tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeChief].Template.Spec.Tolerations, gpuToleration) + assert.Contains(t, tensorflowJob.Spec.TFReplicaSpecs[kubeflowv1.TFJobReplicaTypeWorker].Template.Spec.Tolerations, gpuToleration) + } +} diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go b/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go new file mode 100644 index 0000000000..315cbd042a --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/pod/container_test.go @@ -0,0 +1,710 @@ +package pod + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/protobuf/types/known/timestamppb" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCoreMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + flytek8sConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginsIOMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +var containerResourceRequirements = &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1024m"), + }, +} + +var ( + serviceAccount = "service-account" + podTemplateServiceAccount = "test-service-account" + securityContextServiceAccount = "security-context-service-account" +) + +func dummyContainerTaskTemplate(command []string, args []string) *core.TaskTemplate { + return &core.TaskTemplate{ + Type: "test", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Command: command, + Args: args, + }, + }, + } +} + +func dummyContainerTaskTemplateWithPodSpec(command []string, args []string) *core.TaskTemplate { + + podSpec := v1.PodSpec{ + InitContainers: []v1.Container{ + v1.Container{ + Name: "test-image", + Command: command, + Args: args, + }, + }, + Containers: []v1.Container{ + v1.Container{ + Name: "test-image", + Command: command, + Args: args, + }, + }, + ServiceAccountName: podTemplateServiceAccount, + } + + podSpecPb, err := utils.MarshalObjToStruct(podSpec) + if err != nil { + panic(err) + } + + taskTemplate := &core.TaskTemplate{ + Type: "test", + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: podSpecPb, + }, + }, + Config: map[string]string{ + "primary_container_name": "test-image", + }, + } + + return taskTemplate +} + +func dummyContainerTaskMetadata(resources *v1.ResourceRequirements, extendedResources *core.ExtendedResources, returnsServiceAccount bool, containerImage string) pluginsCore.TaskExecutionMetadata { + taskMetadata := &pluginsCoreMock.TaskExecutionMetadata{} + taskMetadata.On("GetNamespace").Return("test-namespace") + taskMetadata.On("GetAnnotations").Return(map[string]string{"annotation-1": "val1"}) + taskMetadata.On("GetLabels").Return(map[string]string{"label-1": "val1"}) + taskMetadata.On("GetOwnerReference").Return(metav1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + if returnsServiceAccount { + taskMetadata.On("GetK8sServiceAccount").Return(serviceAccount) + } else { + taskMetadata.On("GetK8sServiceAccount").Return("") + } + taskMetadata.On("GetSecurityContext").Return(core.SecurityContext{ + RunAs: &core.Identity{K8SServiceAccount: securityContextServiceAccount}, + }) + taskMetadata.On("GetOwnerID").Return(types.NamespacedName{ + Namespace: "test-namespace", + Name: "test-owner-name", + }) + taskMetadata.OnGetPlatformResources().Return(&v1.ResourceRequirements{}) + + tID := &pluginsCoreMock.TaskExecutionID{} + tID.On("GetID").Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return("my_project:my_domain:my_name") + tID.OnGetUniqueNodeID().Return("unique-node-id") + taskMetadata.On("GetTaskExecutionID").Return(tID) + + to := &pluginsCoreMock.TaskOverrides{} + to.On("GetResources").Return(resources) + to.On("GetExtendedResources").Return(extendedResources) + to.On("GetPodTemplate").Return(nil) + + to.OnGetContainerImage().Return(containerImage) + taskMetadata.On("GetOverrides").Return(to) + taskMetadata.On("IsInterruptible").Return(true) + taskMetadata.On("GetEnvironmentVariables").Return(nil) + taskMetadata.OnGetConsoleURL().Return("") + return taskMetadata +} + +func dummyContainerTaskContext(taskTemplate *core.TaskTemplate, taskMetadata pluginsCore.TaskExecutionMetadata) pluginsCore.TaskExecutionContext { + taskCtx := &pluginsCoreMock.TaskExecutionContext{} + inputReader := &pluginsIOMock.InputReader{} + inputReader.OnGetInputPrefixPath().Return("test-data-reference") + inputReader.OnGetInputPath().Return("test-data-reference") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginsIOMock.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + taskCtx.OnTaskExecutionMetadata().Return(taskMetadata) + + pluginStateReader := &pluginsCoreMock.PluginStateReader{} + pluginStateReader.OnGetMatch(mock.Anything).Return(0, nil) + taskCtx.OnPluginStateReader().Return(pluginStateReader) + + return taskCtx +} + +func dummyContainerPluginContext(taskTemplate *core.TaskTemplate, taskMetadata pluginsCore.TaskExecutionMetadata) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + inputReader := &pluginsIOMock.InputReader{} + inputReader.OnGetInputPrefixPath().Return("test-data-reference") + inputReader.OnGetInputPath().Return("test-data-reference") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginsIOMock.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + + pCtx.OnOutputWriter().Return(outputReader) + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + pCtx.OnTaskExecutionMetadata().Return(taskMetadata) + + pluginStateReader := &pluginsCoreMock.PluginStateReader{} + pluginStateReader.OnGetMatch(mock.Anything).Return(0, nil) + pCtx.OnPluginStateReader().Return(pluginStateReader) + + return pCtx +} + +func TestContainerTaskExecutor_BuildIdentityResource(t *testing.T) { + taskMetadata := &pluginsCoreMock.TaskExecutionMetadata{} + r, err := DefaultPodPlugin.BuildIdentityResource(context.TODO(), taskMetadata) + assert.NoError(t, err) + assert.NotNil(t, r) + _, ok := r.(*v1.Pod) + assert.True(t, ok) + assert.Equal(t, flytek8s.PodKind, r.GetObjectKind().GroupVersionKind().Kind) +} + +func TestContainerTaskExecutor_BuildResource(t *testing.T) { + command := []string{"command"} + args := []string{"{{.Input}}"} + testCases := []struct { + name string + taskTemplate *core.TaskTemplate + taskMetadata pluginsCore.TaskExecutionMetadata + expectServiceAccount string + checkInitContainer bool + }{ + { + name: "BuildResource", + taskTemplate: dummyContainerTaskTemplate(command, args), + taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, true, ""), + expectServiceAccount: serviceAccount, + checkInitContainer: false, + }, + { + name: "BuildResource_PodTemplate", + taskTemplate: dummyContainerTaskTemplateWithPodSpec(command, args), + taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, true, ""), + expectServiceAccount: podTemplateServiceAccount, + checkInitContainer: true, + }, + { + name: "BuildResource_SecurityContext", + taskTemplate: dummyContainerTaskTemplate(command, args), + taskMetadata: dummyContainerTaskMetadata(containerResourceRequirements, nil, false, ""), + expectServiceAccount: securityContextServiceAccount, + checkInitContainer: false, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + taskCtx := dummyContainerTaskContext(tc.taskTemplate, tc.taskMetadata) + + r, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.NoError(t, err) + assert.NotNil(t, r) + j, ok := r.(*v1.Pod) + assert.True(t, ok) + + assert.NotEmpty(t, j.Spec.Containers) + assert.Equal(t, containerResourceRequirements.Limits[v1.ResourceCPU], j.Spec.Containers[0].Resources.Limits[v1.ResourceCPU]) + + ephemeralStorageRes := j.Spec.Containers[0].Resources.Limits[v1.ResourceEphemeralStorage] + assert.Equal(t, int64(0), (&ephemeralStorageRes).Value()) + + assert.Equal(t, command, j.Spec.Containers[0].Command) + assert.Equal(t, []string{"test-data-reference"}, j.Spec.Containers[0].Args) + + if tc.checkInitContainer { + assert.Equal(t, command, j.Spec.InitContainers[0].Command) + assert.Equal(t, []string{"test-data-reference"}, j.Spec.InitContainers[0].Args) + } + + assert.Equal(t, tc.expectServiceAccount, j.Spec.ServiceAccountName) + }) + } +} + +func TestContainerTaskExecutor_BuildResource_ExtendedResources(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + })) + + fixtures := []struct { + name string + resources *v1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []v1.NodeSelectorTerm + expectedTol []v1.Toleration + }{ + { + "without overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + taskTemplate := dummyContainerTaskTemplate([]string{"command"}, []string{"{{.Input}}"}) + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskMetadata := dummyContainerTaskMetadata(f.resources, f.extendedResourcesOverride, true, "") + taskContext := dummyContainerTaskContext(taskTemplate, taskMetadata) + r, err := DefaultPodPlugin.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + pod, ok := r.(*v1.Pod) + assert.True(t, ok) + + assert.EqualValues( + t, + f.expectedNsr, + pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + pod.Spec.Tolerations, + ) + }) + } +} + +func TestContainerTaskExecutor_BuildResource_ContainerImage(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{})) + + fixtures := []struct { + name string + resources *v1.ResourceRequirements + containerImageOverride string + }{ + { + "without overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "", + }, + { + "with overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "test-image", + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + taskTemplate := dummyContainerTaskTemplate([]string{"command"}, []string{"{{.Input}}"}) + taskMetadata := dummyContainerTaskMetadata(f.resources, nil, true, f.containerImageOverride) + taskContext := dummyContainerTaskContext(taskTemplate, taskMetadata) + r, err := DefaultPodPlugin.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + _, ok := r.(*v1.Pod) + assert.True(t, ok) + assert.Equal(t, f.containerImageOverride, r.(*v1.Pod).Spec.Containers[0].Image) + }) + } +} + +func TestContainerTaskExecutor_GetTaskPhase(t *testing.T) { + command := []string{"command"} + args := []string{"{{.Input}}"} + taskTemplate := dummyContainerTaskTemplate(command, args) + taskMetadata := dummyContainerTaskMetadata(containerResourceRequirements, nil, true, "") + pluginContext := dummyContainerPluginContext(taskTemplate, taskMetadata) + + j := &v1.Pod{ + Status: v1.PodStatus{}, + } + startTime := time.Now() + endTime := startTime.Add(time.Hour) + + ctx := context.TODO() + t.Run("running", func(t *testing.T) { + j.Status.Phase = v1.PodRunning + j.Name = "exec-n0-0" + j.Namespace = "ns" + j.Spec.Containers = []v1.Container{{Name: "primary"}} + j.Status.ContainerStatuses = []v1.ContainerStatus{ + { + Name: "primary", + State: v1.ContainerState{ + Running: &v1.ContainerStateRunning{ + StartedAt: metav1.Time{Time: startTime}, + }, + }, + }, + } + j.Status.InitContainerStatuses = []v1.ContainerStatus{ + { + Name: "init", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: startTime}, + FinishedAt: metav1.Time{Time: endTime}, + }, + }, + }, + } + logCtx := &core.LogContext{ + PrimaryPodName: j.Name, + Pods: []*core.PodLogContext{ + { + Namespace: j.Namespace, + PodName: j.Name, + PrimaryContainerName: "primary", + Containers: []*core.ContainerContext{ + { + ContainerName: "primary", + Process: &core.ContainerContext_ProcessContext{ + ContainerStartTime: timestamppb.New(startTime), + }, + }, + }, + InitContainers: []*core.ContainerContext{ + { + ContainerName: "init", + Process: &core.ContainerContext_ProcessContext{ + ContainerStartTime: timestamppb.New(startTime), + ContainerEndTime: timestamppb.New(endTime), + }, + }, + }, + }, + }, + } + + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, phaseInfo.Phase()) + assert.Equal(t, logCtx, phaseInfo.Info().LogContext) + }) + + t.Run("queued", func(t *testing.T) { + j.Status.Phase = v1.PodPending + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseQueued, phaseInfo.Phase()) + }) + + t.Run("failNoCondition", func(t *testing.T) { + j.Status.Phase = v1.PodFailed + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + ec := phaseInfo.Err().GetCode() + assert.Equal(t, "Interrupted", ec) + }) + + t.Run("failConditionUnschedulable", func(t *testing.T) { + j.Status.Phase = v1.PodFailed + j.Status.Reason = "Unschedulable" + j.Status.Message = "some message" + j.Status.Conditions = []v1.PodCondition{ + { + Type: v1.PodReasonUnschedulable, + }, + } + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) + ec := phaseInfo.Err().GetCode() + assert.Equal(t, "Unschedulable", ec) + }) + + t.Run("successOptimized", func(t *testing.T) { + j.Status.Phase = v1.PodRunning + j.Status.ContainerStatuses = []v1.ContainerStatus{ + { + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }, + } + + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, phaseInfo.Phase()) + }) + + t.Run("success", func(t *testing.T) { + j.Status.Phase = v1.PodSucceeded + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, j) + assert.NoError(t, err) + assert.NotNil(t, phaseInfo) + assert.Equal(t, pluginsCore.PhaseSuccess, phaseInfo.Phase()) + }) +} + +func TestContainerTaskExecutor_GetProperties(t *testing.T) { + expected := k8s.PluginProperties{} + assert.Equal(t, expected, DefaultPodPlugin.GetProperties()) +} + +func TestContainerTaskExecutor_GetTaskStatus_InvalidImageName(t *testing.T) { + command := []string{"command"} + args := []string{"{{.Input}}"} + taskTemplate := dummyContainerTaskTemplate(command, args) + taskMetadata := dummyContainerTaskMetadata(containerResourceRequirements, nil, true, "") + pluginContext := dummyContainerPluginContext(taskTemplate, taskMetadata) + + ctx := context.TODO() + reason := "InvalidImageName" + message := "Failed to apply default image tag \"TEST/flyteorg/myapp:latest\": couldn't parse image reference" + + " \"TEST/flyteorg/myapp:latest\": invalid reference format: repository name must be lowercase" + + pendingPod := &v1.Pod{ + Status: v1.PodStatus{ + Phase: v1.PodPending, + Conditions: []v1.PodCondition{ + { + Type: v1.PodReady, + Status: v1.ConditionFalse, + }, + }, + ContainerStatuses: []v1.ContainerStatus{ + { + ContainerID: "ContainerID", + Ready: false, + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: reason, + Message: message, + }, + }, + }, + }, + }, + } + + t.Run("failInvalidImageName", func(t *testing.T) { + pendingPod.Status.Phase = v1.PodPending + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(ctx, pluginContext, pendingPod) + finalReason := fmt.Sprintf("|%s", reason) + finalMessage := fmt.Sprintf("|%s", message) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, phaseInfo.Phase()) + assert.Equal(t, &core.ExecutionError{Code: finalReason, Message: finalMessage, Kind: core.ExecutionError_USER}, phaseInfo.Err()) + }) +} + +func TestContainerTaskExecutor_BuildResource_VscodePort(t *testing.T) { + assert.NoError(t, flytek8sConfig.SetK8sPluginConfig(&flytek8sConfig.K8sPluginConfig{})) + + command := []string{"command"} + args := []string{"{{.Input}}"} + + testCases := []struct { + name string + envVars []v1.EnvVar + expectedPort int32 + }{ + { + name: "ACTION_NAME env var present - port 6060", + envVars: []v1.EnvVar{ + { + Name: flytek8s.FlyteEnableVscode, + Value: "true", + }, + { + Name: "ACTION_NAME", + Value: "some-action", + }, + }, + expectedPort: 6060, + }, + { + name: "ACTION_NAME env var absent - port 8080", + envVars: []v1.EnvVar{ + { + Name: flytek8s.FlyteEnableVscode, + Value: "true", + }, + }, + expectedPort: 8080, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // Create a task template with VSCode enabled and environment variables + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-image", + Command: command, + Args: args, + Env: tc.envVars, + }, + }, + } + + podSpecPb, err := utils.MarshalObjToStruct(podSpec) + assert.NoError(t, err) + + taskTemplate := &core.TaskTemplate{ + Type: "test", + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: podSpecPb, + }, + }, + Config: map[string]string{ + "primary_container_name": "test-image", + }, + } + + taskMetadata := dummyContainerTaskMetadata(containerResourceRequirements, nil, true, "") + taskCtx := dummyContainerTaskContext(taskTemplate, taskMetadata) + + r, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.NoError(t, err) + assert.NotNil(t, r) + + pod, ok := r.(*v1.Pod) + assert.True(t, ok) + + // Verify the readiness probe port + assert.NotEmpty(t, pod.Spec.Containers) + assert.NotNil(t, pod.Spec.Containers[0].ReadinessProbe) + assert.NotNil(t, pod.Spec.Containers[0].ReadinessProbe.HTTPGet) + assert.Equal(t, tc.expectedPort, pod.Spec.Containers[0].ReadinessProbe.HTTPGet.Port.IntVal) + }) + } +} diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go new file mode 100644 index 0000000000..8778e981da --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/pod/plugin.go @@ -0,0 +1,373 @@ +package pod + +import ( + "context" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/controller-runtime/pkg/client" + + pluginserrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + ContainerTaskType = "container" + podTaskType = "pod" + pythonTaskType = "python-task" + rawContainerTaskType = "raw-container" + SidecarTaskType = "sidecar" +) + +// Why, you might wonder do we recreate the generated go struct generated from the plugins.SidecarJob proto? Because +// although we unmarshal the task custom json, the PodSpec itself is not generated from a proto definition, +// but a proper go struct defined in k8s libraries. Therefore we only unmarshal the sidecar as a json, rather than jsonpb. +type sidecarJob struct { + PodSpec *v1.PodSpec + PrimaryContainerName string + Annotations map[string]string + Labels map[string]string +} + +var DefaultPodPlugin = plugin{} + +type plugin struct { +} + +func (plugin) BuildIdentityResource(_ context.Context, _ pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return flytek8s.BuildIdentityPod(), nil +} + +func (p plugin) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + logger.Warnf(ctx, "failed to read task information when trying to construct Pod, err: %s", err.Error()) + return nil, err + } + + var podSpec *v1.PodSpec + objectMeta := &metav1.ObjectMeta{ + Annotations: make(map[string]string), + Labels: make(map[string]string), + } + primaryContainerName := "" + + if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 0 { + // handles pod tasks when they are defined as Sidecar tasks and marshal the podspec using k8s proto. + sidecarJob := sidecarJob{} + err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &sidecarJob) + if err != nil { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + if sidecarJob.PodSpec == nil { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, "invalid TaskSpecification, nil PodSpec [%v]", taskTemplate.GetCustom()) + } + + podSpec = sidecarJob.PodSpec + + // get primary container name + primaryContainerName = sidecarJob.PrimaryContainerName + + // update annotations and labels + objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, sidecarJob.Annotations) + objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, sidecarJob.Labels) + } else if taskTemplate.Type == SidecarTaskType && taskTemplate.TaskTypeVersion == 1 { + // handles pod tasks that marshal the pod spec to the task custom. + err := utils.UnmarshalStructToObj(taskTemplate.GetCustom(), &podSpec) + if err != nil { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "Unable to unmarshal task custom [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + // get primary container name + if len(taskTemplate.GetConfig()) == 0 { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "invalid TaskSpecification, config needs to be non-empty and include missing [%s] key", flytek8s.PrimaryContainerKey) + } + + var ok bool + if primaryContainerName, ok = taskTemplate.GetConfig()[flytek8s.PrimaryContainerKey]; !ok { + return nil, pluginserrors.Errorf(pluginserrors.BadTaskSpecification, + "invalid TaskSpecification, config missing [%s] key in [%v]", flytek8s.PrimaryContainerKey, taskTemplate.GetConfig()) + } + + // update annotations and labels + if taskTemplate.GetK8SPod() != nil && taskTemplate.GetK8SPod().Metadata != nil { + objectMeta.Annotations = utils.UnionMaps(objectMeta.Annotations, taskTemplate.GetK8SPod().Metadata.Annotations) + objectMeta.Labels = utils.UnionMaps(objectMeta.Labels, taskTemplate.GetK8SPod().Metadata.Labels) + } + } else { + // handles both container / pod tasks that use the TaskTemplate Container and K8sPod fields + var err error + podSpec, objectMeta, primaryContainerName, err = flytek8s.BuildRawPod(ctx, taskCtx) + if err != nil { + return nil, err + } + } + + // update podSpec and objectMeta with Flyte customizations + podSpec, objectMeta, err = flytek8s.ApplyFlytePodConfiguration(ctx, taskCtx, podSpec, objectMeta, primaryContainerName) + if err != nil { + return nil, err + } + + for i, c := range podSpec.Containers { + if len(podSpec.Containers) > 1 && c.Name != primaryContainerName { + continue + } + if !flytek8s.IsVscodeEnabled(ctx, podSpec.Containers[i].Env) { + break + } + port := 8080 + // TODO: Will remove this logic once we have a better way to identify v2 tasks + for _, env := range podSpec.Containers[i].Env { + if env.Name != "ACTION_NAME" { + continue + } + port = 6060 + } + + newContainer := c.DeepCopy() + newContainer.ReadinessProbe = &v1.Probe{ + ProbeHandler: v1.ProbeHandler{ + HTTPGet: &v1.HTTPGetAction{ + Port: intstr.FromInt32(int32(port)), + }, + }, + InitialDelaySeconds: 15, + PeriodSeconds: 5, + FailureThreshold: 50, + } + podSpec.Containers[i] = *newContainer + } + + // set primaryContainerKey annotation if this is a Sidecar task or, as an optimization, if there is only a single + // container. this plugin marks the task complete if the primary Container is complete, so if there is only one + // container we can mark the task as complete before the Pod has been marked complete. + if taskTemplate.Type == SidecarTaskType || (len(podSpec.Containers) == 1 && taskTemplate.Type != rawContainerTaskType) { + objectMeta.Annotations[flytek8s.PrimaryContainerKey] = primaryContainerName + } + + if len(podSpec.ServiceAccountName) == 0 { + podSpec.ServiceAccountName = flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) + } + + pod := flytek8s.BuildIdentityPod() + pod.ObjectMeta = *objectMeta + pod.Spec = *podSpec + + return pod, nil +} + +func (p plugin) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, r client.Object) (pluginsCore.PhaseInfo, error) { + logPlugin, err := logs.InitializeLogPlugins(logs.GetLogConfig()) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + return p.GetTaskPhaseWithLogs(ctx, pluginContext, r, logPlugin, "", nil) +} + +func (plugin) GetTaskPhaseWithLogs(ctx context.Context, pluginContext k8s.PluginContext, r client.Object, logPlugin tasklog.Plugin, logSuffix string, extraLogTemplateVarsByScheme []tasklog.TemplateVar) (pluginsCore.PhaseInfo, error) { + pluginState := k8s.PluginState{} + _, err := pluginContext.PluginStateReader().Get(&pluginState) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + taskTemplate, err := pluginContext.TaskReader().Read(ctx) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + pod := r.(*v1.Pod) + + transitionOccurredAt := flytek8s.GetLastTransitionOccurredAt(pod).Time + reportedAt := flytek8s.GetReportedAt(pod).Time + if reportedAt.IsZero() { + reportedAt = transitionOccurredAt + } + + info := pluginsCore.TaskInfo{ + OccurredAt: &transitionOccurredAt, + ReportedAt: &reportedAt, + } + + taskExecID := pluginContext.TaskExecutionMetadata().GetTaskExecutionID() + if pod.Status.Phase != v1.PodUnknown { + taskLogs, err := logs.GetLogsForContainerInPod(ctx, logPlugin, taskExecID, pod, 0, logSuffix, extraLogTemplateVarsByScheme, taskTemplate) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + info.Logs = taskLogs + info.LogContext = &core.LogContext{ + PrimaryPodName: pod.Name, + Pods: []*core.PodLogContext{flytek8s.BuildPodLogContext(pod)}, + } + } + + phaseInfo, err := DemystifyPodStatus(ctx, pod, info) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + if phaseInfo.Phase() >= pluginsCore.PhaseRunning { + for _, tl := range info.Logs { + if tl != nil && tl.LinkType == core.TaskLog_IDE { + tl.Ready = IsPodReady(pod) + if tl.Ready { + phaseInfo.WithReason("Vscode server is ready") + } else { + phaseInfo.WithReason("Vscode server is not ready") + } + break + } + } + } + + k8s.MaybeUpdatePhaseVersion(&phaseInfo, &pluginState) + return phaseInfo, err +} + +func IsPodReady(pod *v1.Pod) bool { + primaryContainerName := flytek8s.GetPrimaryContainerName(pod) + if len(primaryContainerName) == 0 { + // Check pod readiness only when primary container is nod defined. + for _, cond := range pod.Status.Conditions { + if cond.Type == v1.PodReady && cond.Status == v1.ConditionTrue { + return true + } + } + } else { + for _, status := range pod.Status.ContainerStatuses { + if status.Name == primaryContainerName && status.Ready { + return true + } + } + } + return false +} + +func (plugin) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +func DemystifyPodStatus(ctx context.Context, pod *v1.Pod, info pluginsCore.TaskInfo) (pluginsCore.PhaseInfo, error) { + pluginState := k8s.PluginState{} + transitionOccurredAt := flytek8s.GetLastTransitionOccurredAt(pod).Time + phaseInfo := pluginsCore.PhaseInfoUndefined + var err error + primaryContainerName, primaryContainerExists := pod.GetAnnotations()[flytek8s.PrimaryContainerKey] + + hasLogs := len(info.Logs) > 0 || len(info.LogContext.GetPods()) > 0 + switch pod.Status.Phase { + case v1.PodSucceeded: + phaseInfo, err = flytek8s.DemystifySuccess(pod.Status, info) + case v1.PodFailed: + phaseInfo, err = flytek8s.DemystifyFailure(ctx, pod.Status, info, primaryContainerName) + case v1.PodPending: + phaseInfo, err = flytek8s.DemystifyPending(pod.Status, info) + case v1.PodReasonUnschedulable: + phaseInfo = pluginsCore.PhaseInfoQueuedWithTaskInfo(transitionOccurredAt, pluginsCore.DefaultPhaseVersion, "pod unschedulable", &info) + case v1.PodUnknown: + // DO NOTHING + default: + if !primaryContainerExists { + // if all of the containers in the Pod are complete, as an optimization, we can declare the task as + // succeeded rather than waiting for the Pod to be marked completed. + allSuccessfullyTerminated := len(pod.Status.ContainerStatuses) > 0 + for _, s := range pod.Status.ContainerStatuses { + if s.State.Waiting != nil || s.State.Running != nil || (s.State.Terminated != nil && s.State.Terminated.ExitCode != 0) { + allSuccessfullyTerminated = false + } + } + + // Init container will become sidecar if the restart policy is set to Always + // https://kubernetes.io/docs/concepts/workloads/pods/sidecar-containers/#sidecar-containers-and-pod-lifecycle + for _, s := range pod.Status.InitContainerStatuses { + if s.State.Waiting != nil || s.State.Running != nil || (s.State.Terminated != nil && s.State.Terminated.ExitCode != 0) { + allSuccessfullyTerminated = false + } + } + + if allSuccessfullyTerminated { + return flytek8s.DemystifySuccess(pod.Status, info) + } + + // if the primary container annotation does not exist, then the task requires all containers + // to succeed to declare success. therefore, if the pod is not in one of the above states we + // fallback to declaring the task as 'running'. + phaseInfo = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &info) + if hasLogs { + phaseInfo = phaseInfo.WithVersion(pluginsCore.DefaultPhaseVersion + 1) + } + } else { + // if the primary container annotation exists, we use the status of the specified container + phaseInfo = flytek8s.DeterminePrimaryContainerPhase(ctx, primaryContainerName, pod.Status.ContainerStatuses, &info) + if phaseInfo.Phase() == pluginsCore.PhasePermanentFailure && phaseInfo.Err() != nil && + phaseInfo.Err().GetCode() == flytek8s.PrimaryContainerNotFound { + // if the primary container status is not found ensure that the primary container exists. + // note: it should be impossible for the primary container to not exist at this point. + for _, container := range pod.Spec.Containers { + if container.Name == primaryContainerName { + phaseInfo = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, &info) + break + } + } + } else if phaseInfo.Phase() == pluginsCore.PhaseRunning && hasLogs { + phaseInfo = phaseInfo.WithVersion(pluginsCore.DefaultPhaseVersion + 1) + } + } + } + + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + k8s.MaybeUpdatePhaseVersion(&phaseInfo, &pluginState) + return phaseInfo, err +} + +func init() { + // Register ContainerTaskType and SidecarTaskType plugin entries. These separate task types + // still exist within the system, only now both are evaluated using the same internal pod plugin + // instance. This simplifies migration as users may keep the same configuration but are + // seamlessly transitioned from separate container and sidecar plugins to a single pod plugin. + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: ContainerTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{ContainerTaskType, pythonTaskType, rawContainerTaskType}, + ResourceToWatch: &v1.Pod{}, + Plugin: DefaultPodPlugin, + IsDefault: true, + }) + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: SidecarTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{SidecarTaskType}, + ResourceToWatch: &v1.Pod{}, + Plugin: DefaultPodPlugin, + IsDefault: false, + }) + + // register podTaskType plugin entry + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: podTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{ContainerTaskType, pythonTaskType, rawContainerTaskType, SidecarTaskType}, + ResourceToWatch: &v1.Pod{}, + Plugin: DefaultPodPlugin, + IsDefault: true, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/sidecar_test.go b/flyteplugins/go/tasks/plugins/k8s/pod/sidecar_test.go new file mode 100644 index 0000000000..c3f7f1622d --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/pod/sidecar_test.go @@ -0,0 +1,939 @@ +package pod + +import ( + "context" + "encoding/json" + "errors" + "io/ioutil" + "os" + "path" + "testing" + + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + + errors2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCoreMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginsIOMock "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ResourceNvidiaGPU = "nvidia.com/gpu" + +var sidecarResourceRequirements = &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("2048m"), + v1.ResourceEphemeralStorage: resource.MustParse("100M"), + ResourceNvidiaGPU: resource.MustParse("1"), + }, +} + +func getSidecarTaskTemplateForTest(sideCarJob sidecarJob) *core.TaskTemplate { + sidecarJSON, err := json.Marshal(&sideCarJob) + if err != nil { + panic(err) + } + structObj := structpb.Struct{} + err = json.Unmarshal(sidecarJSON, &structObj) + if err != nil { + panic(err) + } + return &core.TaskTemplate{ + Type: SidecarTaskType, + Custom: &structObj, + } +} + +func dummySidecarTaskMetadata(resources *v1.ResourceRequirements, extendedResources *core.ExtendedResources) pluginsCore.TaskExecutionMetadata { + taskMetadata := &pluginsCoreMock.TaskExecutionMetadata{} + taskMetadata.On("GetNamespace").Return("test-namespace") + taskMetadata.On("GetAnnotations").Return(map[string]string{"annotation-1": "val1"}) + + taskMetadata.On("GetLabels").Return(map[string]string{"label-1": "val1"}) + taskMetadata.On("GetOwnerReference").Return(metav1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskMetadata.On("IsInterruptible").Return(true) + taskMetadata.On("GetSecurityContext").Return(core.SecurityContext{}) + taskMetadata.On("GetK8sServiceAccount").Return("service-account") + taskMetadata.On("GetOwnerID").Return(types.NamespacedName{ + Namespace: "test-namespace", + Name: "test-owner-name", + }) + taskMetadata.OnGetPlatformResources().Return(&v1.ResourceRequirements{}) + + tID := &pluginsCoreMock.TaskExecutionID{} + tID.On("GetID").Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return("my_project:my_domain:my_name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + taskMetadata.On("GetTaskExecutionID").Return(tID) + + to := &pluginsCoreMock.TaskOverrides{} + to.On("GetResources").Return(resources) + to.On("GetExtendedResources").Return(extendedResources) + to.On("GetContainerImage").Return("") + to.On("GetPodTemplate").Return(nil) + taskMetadata.On("GetOverrides").Return(to) + taskMetadata.On("GetEnvironmentVariables").Return(nil) + taskMetadata.On("GetConsoleURL").Return("") + + return taskMetadata +} + +func getDummySidecarTaskContext(taskTemplate *core.TaskTemplate, resources *v1.ResourceRequirements, extendedResources *core.ExtendedResources) pluginsCore.TaskExecutionContext { + taskCtx := &pluginsCoreMock.TaskExecutionContext{} + dummyTaskMetadata := dummySidecarTaskMetadata(resources, extendedResources) + inputReader := &pluginsIOMock.InputReader{} + inputReader.OnGetInputPrefixPath().Return("test-data-prefix") + inputReader.OnGetInputPath().Return("test-data-reference") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginsIOMock.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + taskCtx.OnTaskExecutionMetadata().Return(dummyTaskMetadata) + + pluginStateReader := &pluginsCoreMock.PluginStateReader{} + pluginStateReader.OnGetMatch(mock.Anything).Return(0, nil) + taskCtx.OnPluginStateReader().Return(pluginStateReader) + + return taskCtx +} + +func getDummySidecarPluginContext(taskTemplate *core.TaskTemplate, resources *v1.ResourceRequirements) *k8smocks.PluginContext { + pCtx := &k8smocks.PluginContext{} + dummyTaskMetadata := dummySidecarTaskMetadata(resources, nil) + inputReader := &pluginsIOMock.InputReader{} + inputReader.OnGetInputPrefixPath().Return("test-data-prefix") + inputReader.OnGetInputPath().Return("test-data-reference") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginsIOMock.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + pCtx.OnOutputWriter().Return(outputReader) + + taskReader := &pluginsCoreMock.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + pCtx.OnTaskExecutionMetadata().Return(dummyTaskMetadata) + + pluginStateReader := &pluginsCoreMock.PluginStateReader{} + pluginStateReader.OnGetMatch(mock.Anything).Return(0, nil) + pCtx.OnPluginStateReader().Return(pluginStateReader) + + return pCtx +} + +func getPodSpec() v1.PodSpec { + return v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary container", + Args: []string{"pyflyte-execute", "--task-module", "tests.flytekit.unit.sdk.tasks.test_sidecar_tasks", "--task-name", "simple_sidecar_task", "--inputs", "{{.input}}", "--output-prefix", "{{.outputPrefix}}"}, + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "cpu": resource.MustParse("2"), + "memory": resource.MustParse("200Mi"), + "gpu": resource.MustParse("1"), + }, + Requests: v1.ResourceList{ + "cpu": resource.MustParse("1"), + "memory": resource.MustParse("100Mi"), + "gpu": resource.MustParse("1"), + }, + }, + VolumeMounts: []v1.VolumeMount{ + { + Name: "volume mount", + }, + }, + }, + { + Name: "secondary container", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + "gpu": resource.MustParse("2"), + }, + Requests: v1.ResourceList{ + "gpu": resource.MustParse("2"), + }, + }, + }, + }, + Volumes: []v1.Volume{ + { + Name: "dshm", + }, + }, + Tolerations: []v1.Toleration{ + { + Key: "my toleration key", + Value: "my toleration value", + }, + }, + } +} + +func checkTolerations(t *testing.T, res client.Object, gpuTol v1.Toleration) { + // Assert user-specified tolerations don't get overridden + assert.Len(t, res.(*v1.Pod).Spec.Tolerations, 2) + for _, tol := range res.(*v1.Pod).Spec.Tolerations { + if tol.Key == "my toleration key" { + assert.Equal(t, tol.Value, "my toleration value") + } else if tol.Key == gpuTol.Key { + assert.Equal(t, tol, gpuTol) + } else { + t.Fatalf("unexpected toleration [%+v]", tol) + } + } +} + +func TestBuildSidecarResource_TaskType2(t *testing.T) { + podSpec := getPodSpec() + + b, err := json.Marshal(podSpec) + if err != nil { + t.Fatal(err) + } + + structObj := &structpb.Struct{} + if err := json.Unmarshal(b, structObj); err != nil { + t.Fatal(err) + } + + task := core.TaskTemplate{ + Type: SidecarTaskType, + TaskTypeVersion: 2, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "primary container", + }, + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: structObj, + Metadata: &core.K8SObjectMetadata{ + Labels: map[string]string{ + "label": "foo", + }, + Annotations: map[string]string{ + "anno": "bar", + }, + }, + }, + }, + } + + tolGPU := v1.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + tolEphemeralStorage := v1.Toleration{ + Key: "ephemeral-storage", + Value: "dedicated", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + ResourceTolerations: map[v1.ResourceName][]v1.Toleration{ + v1.ResourceStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + GpuResourceName: ResourceNvidiaGPU, + })) + taskCtx := getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + res, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.Nil(t, err) + assert.EqualValues(t, map[string]string{ + flytek8s.PrimaryContainerKey: "primary container", + "anno": "bar", + }, res.GetAnnotations()) + assert.EqualValues(t, map[string]string{ + "label": "foo", + }, res.GetLabels()) + + // Assert volumes & volume mounts are preserved + assert.Len(t, res.(*v1.Pod).Spec.Volumes, 1) + assert.Equal(t, "dshm", res.(*v1.Pod).Spec.Volumes[0].Name) + + assert.Len(t, res.(*v1.Pod).Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "volume mount", res.(*v1.Pod).Spec.Containers[0].VolumeMounts[0].Name) + + checkTolerations(t, res, tolGPU) + + // Assert resource requirements are correctly set + expectedCPURequest := resource.MustParse("1") + assert.Equal(t, expectedCPURequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Cpu().Value()) + expectedMemRequest := resource.MustParse("100Mi") + assert.Equal(t, expectedMemRequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Memory().Value()) + expectedCPULimit := resource.MustParse("2048m") + assert.Equal(t, expectedCPULimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Cpu().Value()) + expectedMemLimit := resource.MustParse("200Mi") + assert.Equal(t, expectedMemLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Memory().Value()) + expectedEphemeralStorageLimit := resource.MustParse("100M") + assert.Equal(t, expectedEphemeralStorageLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.StorageEphemeral().Value()) + + expectedGPURes := resource.MustParse("1") + assert.Equal(t, expectedGPURes, res.(*v1.Pod).Spec.Containers[0].Resources.Requests[ResourceNvidiaGPU]) + assert.Equal(t, expectedGPURes, res.(*v1.Pod).Spec.Containers[0].Resources.Limits[ResourceNvidiaGPU]) + expectedGPURes = resource.MustParse("2") + assert.Equal(t, expectedGPURes, res.(*v1.Pod).Spec.Containers[1].Resources.Requests[ResourceNvidiaGPU]) + assert.Equal(t, expectedGPURes, res.(*v1.Pod).Spec.Containers[1].Resources.Limits[ResourceNvidiaGPU]) +} + +func TestBuildSidecarResource_TaskType2_Invalid_Spec(t *testing.T) { + task := core.TaskTemplate{ + Type: SidecarTaskType, + TaskTypeVersion: 2, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "primary container", + }, + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + Metadata: &core.K8SObjectMetadata{ + Labels: map[string]string{ + "label": "foo", + }, + Annotations: map[string]string{ + "anno": "bar", + }, + }, + }, + }, + } + + taskCtx := getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + _, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.EqualError(t, err, "[BadTaskSpecification] Pod tasks with task type version > 1 should specify their target as a K8sPod with a defined pod spec") +} + +func TestBuildSidecarResource_TaskType1(t *testing.T) { + podSpec := getPodSpec() + + b, err := json.Marshal(podSpec) + if err != nil { + t.Fatal(err) + } + + structObj := &structpb.Struct{} + if err := json.Unmarshal(b, structObj); err != nil { + t.Fatal(err) + } + + task := core.TaskTemplate{ + Type: SidecarTaskType, + Custom: structObj, + TaskTypeVersion: 1, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "primary container", + }, + } + + tolGPU := v1.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + tolEphemeralStorage := v1.Toleration{ + Key: "ephemeral-storage", + Value: "dedicated", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + ResourceTolerations: map[v1.ResourceName][]v1.Toleration{ + v1.ResourceStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + })) + taskCtx := getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + res, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.Nil(t, err) + assert.EqualValues(t, map[string]string{ + flytek8s.PrimaryContainerKey: "primary container", + }, res.GetAnnotations()) + assert.EqualValues(t, map[string]string{}, res.GetLabels()) + + // Assert volumes & volume mounts are preserved + assert.Len(t, res.(*v1.Pod).Spec.Volumes, 1) + assert.Equal(t, "dshm", res.(*v1.Pod).Spec.Volumes[0].Name) + + assert.Len(t, res.(*v1.Pod).Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "volume mount", res.(*v1.Pod).Spec.Containers[0].VolumeMounts[0].Name) + + checkTolerations(t, res, tolGPU) + // Assert resource requirements are correctly set + expectedCPURequest := resource.MustParse("1") + assert.Equal(t, expectedCPURequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Cpu().Value()) + expectedMemRequest := resource.MustParse("100Mi") + assert.Equal(t, expectedMemRequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Memory().Value()) + expectedCPULimit := resource.MustParse("2048m") + assert.Equal(t, expectedCPULimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Cpu().Value()) + expectedMemLimit := resource.MustParse("200Mi") + assert.Equal(t, expectedMemLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Memory().Value()) + expectedEphemeralStorageLimit := resource.MustParse("100M") + assert.Equal(t, expectedEphemeralStorageLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.StorageEphemeral().Value()) +} + +func TestBuildSideResource_TaskType1_InvalidSpec(t *testing.T) { + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary container", + }, + { + Name: "secondary container", + }, + }, + } + + b, err := json.Marshal(podSpec) + if err != nil { + t.Fatal(err) + } + + structObj := &structpb.Struct{} + if err := json.Unmarshal(b, structObj); err != nil { + t.Fatal(err) + } + + task := core.TaskTemplate{ + Type: SidecarTaskType, + Custom: structObj, + TaskTypeVersion: 1, + } + + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + ResourceTolerations: map[v1.ResourceName][]v1.Toleration{ + v1.ResourceStorage: {}, + ResourceNvidiaGPU: {}, + }, + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + })) + taskCtx := getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + _, err = DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.EqualError(t, err, "[BadTaskSpecification] invalid TaskSpecification, config needs to be non-empty and include missing [primary_container_name] key") + + task.Config = map[string]string{ + "foo": "bar", + } + taskCtx = getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + _, err = DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.EqualError(t, err, "[BadTaskSpecification] invalid TaskSpecification, config missing [primary_container_name] key in [map[foo:bar]]") + +} + +func TestBuildSidecarResource(t *testing.T) { + dir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + sidecarCustomJSON, err := ioutil.ReadFile(path.Join(dir, "testdata", "sidecar_custom")) + if err != nil { + t.Fatal(sidecarCustomJSON) + } + sidecarCustom := structpb.Struct{} + if err := json.Unmarshal(sidecarCustomJSON, &sidecarCustom); err != nil { + t.Fatal(err) + } + task := core.TaskTemplate{ + Type: SidecarTaskType, + Custom: &sidecarCustom, + } + + tolGPU := v1.Toleration{ + Key: "flyte/gpu", + Value: "dedicated", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + } + + tolEphemeralStorage := v1.Toleration{ + Key: "ephemeral-storage", + Value: "dedicated", + Operator: v1.TolerationOpExists, + Effect: v1.TaintEffectNoSchedule, + } + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + ResourceTolerations: map[v1.ResourceName][]v1.Toleration{ + v1.ResourceStorage: {tolEphemeralStorage}, + ResourceNvidiaGPU: {tolGPU}, + }, + DefaultCPURequest: resource.MustParse("1024m"), + DefaultMemoryRequest: resource.MustParse("1024Mi"), + })) + taskCtx := getDummySidecarTaskContext(&task, sidecarResourceRequirements, nil) + res, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.Nil(t, err) + assert.EqualValues(t, map[string]string{ + flytek8s.PrimaryContainerKey: "a container", + "a1": "a1", + }, res.GetAnnotations()) + + assert.EqualValues(t, map[string]string{ + "b1": "b1", + }, res.GetLabels()) + + // Assert volumes & volume mounts are preserved + assert.Len(t, res.(*v1.Pod).Spec.Volumes, 1) + assert.Equal(t, "dshm", res.(*v1.Pod).Spec.Volumes[0].Name) + + assert.Len(t, res.(*v1.Pod).Spec.Containers[0].VolumeMounts, 1) + assert.Equal(t, "volume mount", res.(*v1.Pod).Spec.Containers[0].VolumeMounts[0].Name) + + assert.Equal(t, "service-account", res.(*v1.Pod).Spec.ServiceAccountName) + + checkTolerations(t, res, tolGPU) + + // Assert resource requirements are correctly set + expectedCPURequest := resource.MustParse("2048m") + assert.Equal(t, expectedCPURequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Cpu().Value()) + expectedMemRequest := resource.MustParse("1024Mi") + assert.Equal(t, expectedMemRequest.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Requests.Memory().Value()) + expectedCPULimit := resource.MustParse("2048m") + assert.Equal(t, expectedCPULimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Cpu().Value()) + expectedMemLimit := resource.MustParse("1024Mi") + assert.Equal(t, expectedMemLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.Memory().Value()) + expectedEphemeralStorageLimit := resource.MustParse("100M") + assert.Equal(t, expectedEphemeralStorageLimit.Value(), res.(*v1.Pod).Spec.Containers[0].Resources.Limits.StorageEphemeral().Value()) +} + +func TestBuildSidecarReosurceMissingAnnotationsAndLabels(t *testing.T) { + sideCarJob := sidecarJob{ + PrimaryContainerName: "PrimaryContainer", + PodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "PrimaryContainer", + }, + }, + }, + } + + task := getSidecarTaskTemplateForTest(sideCarJob) + + taskCtx := getDummySidecarTaskContext(task, sidecarResourceRequirements, nil) + resp, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.NoError(t, err) + assert.EqualValues(t, map[string]string{}, resp.GetLabels()) + assert.EqualValues(t, map[string]string{"primary_container_name": "PrimaryContainer"}, resp.GetAnnotations()) +} + +func TestBuildSidecarResourceMissingPrimary(t *testing.T) { + sideCarJob := sidecarJob{ + PrimaryContainerName: "PrimaryContainer", + PodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "SecondaryContainer", + }, + }, + }, + } + + task := getSidecarTaskTemplateForTest(sideCarJob) + + taskCtx := getDummySidecarTaskContext(task, sidecarResourceRequirements, nil) + _, err := DefaultPodPlugin.BuildResource(context.TODO(), taskCtx) + assert.True(t, errors.Is(err, errors2.Errorf("BadTaskSpecification", ""))) +} + +func TestBuildSidecarResource_ExtendedResources(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + })) + + fixtures := []struct { + name string + resources *v1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []v1.NodeSelectorTerm + expectedTol []v1.Toleration + }{ + { + "without overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &v1.ResourceRequirements{ + Limits: v1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + v1.NodeSelectorRequirement{ + Key: "gpu-node-label", + Operator: v1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + v1.NodeSelectorRequirement{ + Key: "gpu-partition-size", + Operator: v1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []v1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: v1.TolerationOpEqual, + Effect: v1.TaintEffectNoSchedule, + }, + }, + }, + } + + podSpec := v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "primary container", + }, + }, + } + b, err := json.Marshal(podSpec) + if err != nil { + t.Fatal(err) + } + structObj := &structpb.Struct{} + if err := json.Unmarshal(b, structObj); err != nil { + t.Fatal(err) + } + testConfigs := []struct { + name string + taskTemplate core.TaskTemplate + }{ + { + "v0", + *getSidecarTaskTemplateForTest(sidecarJob{ + PrimaryContainerName: podSpec.Containers[0].Name, + PodSpec: &podSpec, + }), + }, + { + "v1", + core.TaskTemplate{ + Type: SidecarTaskType, + Custom: structObj, + TaskTypeVersion: 1, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: podSpec.Containers[0].Name, + }, + }, + }, + { + "v2", + core.TaskTemplate{ + Type: SidecarTaskType, + TaskTypeVersion: 2, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: podSpec.Containers[0].Name, + }, + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: structObj, + }, + }, + }, + }, + } + + for _, tCfg := range testConfigs { + for _, f := range fixtures { + t.Run(tCfg.name+" "+f.name, func(t *testing.T) { + taskTemplate := tCfg.taskTemplate + taskTemplate.ExtendedResources = f.extendedResourcesBase + taskContext := getDummySidecarTaskContext(&taskTemplate, f.resources, f.extendedResourcesOverride) + r, err := DefaultPodPlugin.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + pod, ok := r.(*v1.Pod) + assert.True(t, ok) + + assert.EqualValues( + t, + f.expectedNsr, + pod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + f.expectedTol, + pod.Spec.Tolerations, + ) + }) + } + } +} + +func TestGetTaskSidecarStatus(t *testing.T) { + sideCarJob := sidecarJob{ + PrimaryContainerName: "PrimaryContainer", + PodSpec: &v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "PrimaryContainer", + }, + }, + }, + } + + task := getSidecarTaskTemplateForTest(sideCarJob) + + var testCases = map[v1.PodPhase]pluginsCore.Phase{ + v1.PodSucceeded: pluginsCore.PhaseSuccess, + v1.PodFailed: pluginsCore.PhaseRetryableFailure, + v1.PodReasonUnschedulable: pluginsCore.PhaseQueued, + v1.PodUnknown: pluginsCore.PhaseUndefined, + } + + for podPhase, expectedTaskPhase := range testCases { + res := &v1.Pod{ + Status: v1.PodStatus{ + Phase: podPhase, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "PrimaryContainer", + }) + pluginContext := getDummySidecarPluginContext(task, sidecarResourceRequirements) + + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + + assert.Nil(t, err) + assert.Equal(t, expectedTaskPhase, phaseInfo.Phase(), + "Expected [%v] got [%v] instead, for podPhase [%v]", expectedTaskPhase, phaseInfo.Phase(), podPhase) + } +} + +func TestDemystifiedSidecarStatus_PrimaryFailed(t *testing.T) { + res := &v1.Pod{ + Status: v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "Primary", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 1, + }, + }, + }, + }, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "Primary", + }) + pluginContext := getDummySidecarPluginContext(&core.TaskTemplate{}, sidecarResourceRequirements) + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRetryableFailure, phaseInfo.Phase()) +} + +func TestDemystifiedSidecarStatus_PrimarySucceeded(t *testing.T) { + res := &v1.Pod{ + Status: v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "Primary", + State: v1.ContainerState{ + Terminated: &v1.ContainerStateTerminated{ + ExitCode: 0, + }, + }, + }, + }, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "Primary", + }) + pluginContext := getDummySidecarPluginContext(&core.TaskTemplate{}, sidecarResourceRequirements) + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseSuccess, phaseInfo.Phase()) +} + +func TestDemystifiedSidecarStatus_PrimaryRunning(t *testing.T) { + res := &v1.Pod{ + Status: v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "Primary", + State: v1.ContainerState{ + Waiting: &v1.ContainerStateWaiting{ + Reason: "stay patient", + }, + }, + }, + }, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "Primary", + }) + pluginContext := getDummySidecarPluginContext(&core.TaskTemplate{}, sidecarResourceRequirements) + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, phaseInfo.Phase()) +} + +func TestDemystifiedSidecarStatus_PrimaryMissing(t *testing.T) { + res := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "Secondary", + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "Secondary", + }, + }, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "Primary", + }) + pluginContext := getDummySidecarPluginContext(&core.TaskTemplate{}, sidecarResourceRequirements) + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhasePermanentFailure, phaseInfo.Phase()) +} + +func TestDemystifiedSidecarStatus_PrimaryNotExistsYet(t *testing.T) { + res := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "Primary", + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + ContainerStatuses: []v1.ContainerStatus{ + { + Name: "Secondary", + }, + }, + }, + } + res.SetAnnotations(map[string]string{ + flytek8s.PrimaryContainerKey: "Primary", + }) + pluginContext := getDummySidecarPluginContext(&core.TaskTemplate{}, sidecarResourceRequirements) + phaseInfo, err := DefaultPodPlugin.GetTaskPhase(context.TODO(), pluginContext, res) + assert.Nil(t, err) + assert.Equal(t, pluginsCore.PhaseRunning, phaseInfo.Phase()) +} + +func TestGetProperties(t *testing.T) { + expected := k8s.PluginProperties{} + assert.Equal(t, expected, DefaultPodPlugin.GetProperties()) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/pod/testdata/sidecar_custom b/flyteplugins/go/tasks/plugins/k8s/pod/testdata/sidecar_custom new file mode 100755 index 0000000000..00b01208a7 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/pod/testdata/sidecar_custom @@ -0,0 +1,54 @@ +{ + "podSpec": { + "restartPolicy": "OnFailure", + "containers": [{ + "name": "a container", + "image": "foo", + "args": ["pyflyte-execute", "--task-module", "tests.flytekit.unit.sdk.tasks.test_sidecar_tasks", "--task-name", "simple_sidecar_task", "--inputs", "{{.input}}", "--output-prefix", "{{.outputPrefix}}"], + "volumeMounts": [{ + "mountPath": "some/where", + "name": "volume mount" + }], + "env": [{ + "name": "FLYTE_INTERNAL_CONFIGURATION_PATH", + "value": "flytekit.config" + }, { + "name": "FLYTE_INTERNAL_PROJECT", + "value": "" + }, { + "name": "foo", + "value": "bar" + }, { + "name": "FLYTE_INTERNAL_DOMAIN", + "value": "" + }, { + "name": "FLYTE_INTERNAL_VERSION", + "value": "" + }] + }, { + "name": "another container" + }], + "volumes": [{ + "volumeSource": { + "emptyDir": { + "sizeLimit": { + "string": "10G" + }, + "medium": "Memory" + } + }, + "name": "dshm" + }], + "tolerations": [{ + "key": "my toleration key", + "value": "my toleration value" + }] + }, + "primaryContainerName": "a container", + "annotations": { + "a1": "a1" + }, + "labels": { + "b1": "b1" + } +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config.go b/flyteplugins/go/tasks/plugins/k8s/ray/config.go new file mode 100644 index 0000000000..082eda0c92 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config.go @@ -0,0 +1,108 @@ +package ray + +import ( + "context" + + v1 "k8s.io/api/core/v1" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginmachinery "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + ShutdownAfterJobFinishes: true, + TTLSecondsAfterFinished: 3600, + ServiceType: "NodePort", + IncludeDashboard: true, + DashboardHost: "0.0.0.0", + EnableUsageStats: false, + ServiceAccount: "default", + Defaults: DefaultConfig{ + HeadNode: NodeConfig{ + StartParameters: map[string]string{ + // Disable usage reporting by default: https://docs.ray.io/en/latest/cluster/usage-stats.html + DisableUsageStatsStartParameter: "true", + }, + IPAddress: "$MY_POD_IP", + }, + WorkerNode: NodeConfig{ + StartParameters: map[string]string{ + // Disable usage reporting by default: https://docs.ray.io/en/latest/cluster/usage-stats.html + DisableUsageStatsStartParameter: "true", + }, + IPAddress: "$MY_POD_IP", + }, + }, + } + + configSection = pluginsConfig.MustRegisterSubSectionWithUpdates("ray", &defaultConfig, + func(ctx context.Context, newValue config.Config) { + if newValue == nil { + return + } + + if len(newValue.(*Config).Defaults.HeadNode.IPAddress) == 0 { + newValue.(*Config).Defaults.HeadNode.IPAddress = newValue.(*Config).DeprecatedNodeIPAddress + } + + if len(newValue.(*Config).Defaults.WorkerNode.IPAddress) == 0 { + newValue.(*Config).Defaults.WorkerNode.IPAddress = newValue.(*Config).DeprecatedNodeIPAddress + } + }) +) + +// Config is config for 'ray' plugin +type Config struct { + // ShutdownAfterJobFinishes will determine whether to delete the ray cluster once rayJob succeed or failed + ShutdownAfterJobFinishes bool `json:"shutdownAfterJobFinishes,omitempty"` + + // TTLSecondsAfterFinished is the TTL to clean up RayCluster. + // It's only working when ShutdownAfterJobFinishes set to true. + TTLSecondsAfterFinished int32 `json:"ttlSecondsAfterFinished,omitempty"` + + // Kubernetes Service Type, valid values are 'ClusterIP', 'NodePort' and 'LoadBalancer' + ServiceType string `json:"serviceType,omitempty"` + + // IncludeDashboard is used to start a Ray Dashboard if set to true + IncludeDashboard bool `json:"includeDashboard,omitempty"` + + // DashboardHost the host to bind the dashboard server to, either localhost (127.0.0.1) + // or 0.0.0.0 (available from all interfaces). By default, this is localhost. + DashboardHost string `json:"dashboardHost,omitempty"` + + // DeprecatedNodeIPAddress the IP address of the head node. By default, this is pod ip address. + DeprecatedNodeIPAddress string `json:"nodeIPAddress,omitempty" pflag:"-,DEPRECATED. Please use DefaultConfig.[HeadNode|WorkerNode].IPAddress"` + + // Remote Ray Cluster Config + RemoteClusterConfig pluginmachinery.ClusterConfig `json:"remoteClusterConfig" pflag:"Configuration of remote K8s cluster for ray jobs"` + Logs logs.LogConfig `json:"logs" pflag:"-,Log configuration for ray jobs"` + LogsSidecar *v1.Container `json:"logsSidecar" pflag:"-,Sidecar to inject into head pods for capturing ray job logs"` + DashboardURLTemplate *tasklog.TemplateLogPlugin `json:"dashboardURLTemplate" pflag:"-,Template for URL of Ray dashboard running on a head node."` + Defaults DefaultConfig `json:"defaults" pflag:"-,Default configuration for ray jobs"` + EnableUsageStats bool `json:"enableUsageStats" pflag:",Enable usage stats for ray jobs. These stats are submitted to usage-stats.ray.io per https://docs.ray.io/en/latest/cluster/usage-stats.html"` + ServiceAccount string `json:"serviceAccount" pflag:",The k8s service account to run as"` +} + +type DefaultConfig struct { + HeadNode NodeConfig `json:"headNode,omitempty" pflag:"-,Default configuration for head node of ray jobs"` + WorkerNode NodeConfig `json:"workerNode,omitempty" pflag:"-,Default configuration for worker node of ray jobs"` +} + +type NodeConfig struct { + StartParameters map[string]string `json:"startParameters,omitempty" pflag:"-,Start parameters for the node"` + IPAddress string `json:"ipAddress,omitempty" pflag:"-,IP address of the node"` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config_flags.go b/flyteplugins/go/tasks/plugins/k8s/ray/config_flags.go new file mode 100755 index 0000000000..5048869eab --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config_flags.go @@ -0,0 +1,64 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package ray + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "shutdownAfterJobFinishes"), defaultConfig.ShutdownAfterJobFinishes, "") + cmdFlags.Int32(fmt.Sprintf("%v%v", prefix, "ttlSecondsAfterFinished"), defaultConfig.TTLSecondsAfterFinished, "") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "serviceType"), defaultConfig.ServiceType, "") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "includeDashboard"), defaultConfig.IncludeDashboard, "") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "dashboardHost"), defaultConfig.DashboardHost, "") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "remoteClusterConfig.name"), defaultConfig.RemoteClusterConfig.Name, "Friendly name of the remote cluster") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "remoteClusterConfig.endpoint"), defaultConfig.RemoteClusterConfig.Endpoint, " Remote K8s cluster endpoint") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "remoteClusterConfig.enabled"), defaultConfig.RemoteClusterConfig.Enabled, " Boolean flag to enable or disable") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "enableUsageStats"), defaultConfig.EnableUsageStats, "Enable usage stats for ray jobs. These stats are submitted to usage-stats.ray.io per https://docs.ray.io/en/latest/cluster/usage-stats.html") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "serviceAccount"), defaultConfig.ServiceAccount, "The k8s service account to run as") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config_flags_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/config_flags_test.go new file mode 100755 index 0000000000..05871adc51 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config_flags_test.go @@ -0,0 +1,242 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package ray + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_shutdownAfterJobFinishes", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("shutdownAfterJobFinishes", testValue) + if vBool, err := cmdFlags.GetBool("shutdownAfterJobFinishes"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.ShutdownAfterJobFinishes) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_ttlSecondsAfterFinished", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("ttlSecondsAfterFinished", testValue) + if vInt32, err := cmdFlags.GetInt32("ttlSecondsAfterFinished"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt32), &actual.TTLSecondsAfterFinished) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_serviceType", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("serviceType", testValue) + if vString, err := cmdFlags.GetString("serviceType"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ServiceType) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_includeDashboard", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("includeDashboard", testValue) + if vBool, err := cmdFlags.GetBool("includeDashboard"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.IncludeDashboard) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_dashboardHost", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("dashboardHost", testValue) + if vString, err := cmdFlags.GetString("dashboardHost"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.DashboardHost) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_remoteClusterConfig.name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("remoteClusterConfig.name", testValue) + if vString, err := cmdFlags.GetString("remoteClusterConfig.name"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.RemoteClusterConfig.Name) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_remoteClusterConfig.endpoint", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("remoteClusterConfig.endpoint", testValue) + if vString, err := cmdFlags.GetString("remoteClusterConfig.endpoint"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.RemoteClusterConfig.Endpoint) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_remoteClusterConfig.enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("remoteClusterConfig.enabled", testValue) + if vBool, err := cmdFlags.GetBool("remoteClusterConfig.enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.RemoteClusterConfig.Enabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_enableUsageStats", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("enableUsageStats", testValue) + if vBool, err := cmdFlags.GetBool("enableUsageStats"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.EnableUsageStats) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_serviceAccount", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("serviceAccount", testValue) + if vString, err := cmdFlags.GetString("serviceAccount"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.ServiceAccount) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go new file mode 100644 index 0000000000..fcfe38cec0 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/config_test.go @@ -0,0 +1,37 @@ +package ray + +import ( + "testing" + + "gotest.tools/assert" + + pluginmachinery "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" +) + +func TestLoadConfig(t *testing.T) { + rayConfig := GetConfig() + assert.Assert(t, rayConfig != nil) + + t.Run("remote cluster", func(t *testing.T) { + config := GetConfig() + remoteConfig := pluginmachinery.ClusterConfig{ + Enabled: false, + Endpoint: "", + Auth: pluginmachinery.Auth{ + TokenPath: "", + CaCertPath: "", + }, + } + assert.DeepEqual(t, config.RemoteClusterConfig, remoteConfig) + }) +} + +func TestLoadDefaultServiceAccountConfig(t *testing.T) { + rayConfig := GetConfig() + assert.Assert(t, rayConfig != nil) + + t.Run("serviceAccount", func(t *testing.T) { + config := GetConfig() + assert.Equal(t, config.ServiceAccount, "default") + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go new file mode 100644 index 0000000000..cd0f76f253 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray.go @@ -0,0 +1,786 @@ +package ray + +import ( + "context" + "encoding/base64" + "encoding/json" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + "github.com/samber/lo" + "gopkg.in/yaml.v2" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + flyteerr "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/plugins/k8s/pod" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const ( + rayStateMountPath = "/tmp/ray" + defaultRayStateVolName = "system-ray-state" + rayTaskType = "ray" + KindRayJob = "RayJob" + IncludeDashboard = "include-dashboard" + NodeIPAddress = "node-ip-address" + DashboardHost = "dashboard-host" + DisableUsageStatsStartParameter = "disable-usage-stats" + DisableUsageStatsStartParameterVal = "true" + RayHeadContainerName = "ray-head" +) + +var logTemplateRegexes = struct { + RayClusterName *regexp.Regexp + RayJobID *regexp.Regexp +}{ + tasklog.MustCreateRegex("rayClusterName"), + tasklog.MustCreateRegex("rayJobID"), +} + +// Copy it from KubeRay to avoid adding a new dependency to go.mod. +// https://github.com/ray-project/kuberay/blob/1ced2b968eabcfee4dcfa61391d307b60e46a2ed/ray-operator/controllers/ray/common/job.go#L122-L145 +var submitterDefaultResourceRequirements = v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("1"), + v1.ResourceMemory: resource.MustParse("1Gi"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("500m"), + v1.ResourceMemory: resource.MustParse("200Mi"), + }, +} + +type rayJobResourceHandler struct{} + +func (rayJobResourceHandler) GetProperties() k8s.PluginProperties { + maxLength := 47 + return k8s.PluginProperties{GeneratedNameMaxLength: &maxLength} +} + +// BuildResource Creates a new ray job resource +func (rayJobResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "nil task specification") + } + + rayJob := plugins.RayJob{} + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &rayJob) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "invalid TaskSpecification [%v], Err: [%v]", taskTemplate.GetCustom(), err.Error()) + } + + podSpec, objectMeta, primaryContainerName, err := flytek8s.ToK8sPodSpec(ctx, taskCtx) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to create pod spec: [%v]", err.Error()) + } + + var primaryContainer *v1.Container + var primaryContainerIdx int + for idx, c := range podSpec.Containers { + if c.Name == primaryContainerName { + c := c + primaryContainer = &c + primaryContainerIdx = idx + break + } + } + + if primaryContainer == nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, "Unable to get primary container from the pod: [%v]", err.Error()) + } + + cfg := GetConfig() + + headNodeRayStartParams := make(map[string]string) + if rayJob.RayCluster.HeadGroupSpec != nil && rayJob.RayCluster.HeadGroupSpec.RayStartParams != nil { + headNodeRayStartParams = rayJob.RayCluster.HeadGroupSpec.RayStartParams + } else if headNode := cfg.Defaults.HeadNode; len(headNode.StartParameters) > 0 { + headNodeRayStartParams = headNode.StartParameters + } + + if _, exist := headNodeRayStartParams[IncludeDashboard]; !exist { + headNodeRayStartParams[IncludeDashboard] = strconv.FormatBool(GetConfig().IncludeDashboard) + } + + if _, exist := headNodeRayStartParams[NodeIPAddress]; !exist { + headNodeRayStartParams[NodeIPAddress] = cfg.Defaults.HeadNode.IPAddress + } + + if _, exist := headNodeRayStartParams[DashboardHost]; !exist { + headNodeRayStartParams[DashboardHost] = cfg.DashboardHost + } + + if _, exists := headNodeRayStartParams[DisableUsageStatsStartParameter]; !exists && !cfg.EnableUsageStats { + headNodeRayStartParams[DisableUsageStatsStartParameter] = DisableUsageStatsStartParameterVal + } + + podSpec.ServiceAccountName = cfg.ServiceAccount + + rayjob, err := constructRayJob(taskCtx, &rayJob, objectMeta, *podSpec, headNodeRayStartParams, primaryContainerIdx, *primaryContainer) + + return rayjob, err +} + +func constructRayJob(taskCtx pluginsCore.TaskExecutionContext, rayJob *plugins.RayJob, objectMeta *metav1.ObjectMeta, taskPodSpec v1.PodSpec, headNodeRayStartParams map[string]string, primaryContainerIdx int, primaryContainer v1.Container) (*rayv1.RayJob, error) { + enableIngress := true + cfg := GetConfig() + + headPodSpec := taskPodSpec.DeepCopy() + headPodTemplate, err := buildHeadPodTemplate( + &headPodSpec.Containers[primaryContainerIdx], + headPodSpec, + objectMeta, + taskCtx, + rayJob.RayCluster.HeadGroupSpec, + ) + if err != nil { + return nil, err + } + + rayClusterSpec := rayv1.RayClusterSpec{ + HeadGroupSpec: rayv1.HeadGroupSpec{ + Template: headPodTemplate, + ServiceType: v1.ServiceType(cfg.ServiceType), + EnableIngress: &enableIngress, + RayStartParams: headNodeRayStartParams, + }, + WorkerGroupSpecs: []rayv1.WorkerGroupSpec{}, + EnableInTreeAutoscaling: &rayJob.RayCluster.EnableAutoscaling, + } + + for _, spec := range rayJob.RayCluster.WorkerGroupSpec { + workerPodSpec := taskPodSpec.DeepCopy() + workerPodTemplate, err := buildWorkerPodTemplate( + &workerPodSpec.Containers[primaryContainerIdx], + workerPodSpec, + objectMeta, + taskCtx, + spec, + ) + if err != nil { + return nil, err + } + + workerNodeRayStartParams := make(map[string]string) + if spec.RayStartParams != nil { + workerNodeRayStartParams = spec.RayStartParams + } else if workerNode := cfg.Defaults.WorkerNode; len(workerNode.StartParameters) > 0 { + workerNodeRayStartParams = workerNode.StartParameters + } + + if _, exist := workerNodeRayStartParams[NodeIPAddress]; !exist { + workerNodeRayStartParams[NodeIPAddress] = cfg.Defaults.WorkerNode.IPAddress + } + + if _, exists := workerNodeRayStartParams[DisableUsageStatsStartParameter]; !exists && !cfg.EnableUsageStats { + workerNodeRayStartParams[DisableUsageStatsStartParameter] = DisableUsageStatsStartParameterVal + } + + minReplicas := spec.MinReplicas + if minReplicas > spec.Replicas { + minReplicas = spec.Replicas + } + maxReplicas := spec.MaxReplicas + if maxReplicas < spec.Replicas { + maxReplicas = spec.Replicas + } + + workerNodeSpec := rayv1.WorkerGroupSpec{ + GroupName: spec.GroupName, + MinReplicas: &minReplicas, + MaxReplicas: &maxReplicas, + Replicas: &spec.Replicas, + RayStartParams: workerNodeRayStartParams, + Template: workerPodTemplate, + } + + rayClusterSpec.WorkerGroupSpecs = append(rayClusterSpec.WorkerGroupSpecs, workerNodeSpec) + } + + serviceAccountName := flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) + if len(serviceAccountName) == 0 { + serviceAccountName = cfg.ServiceAccount + } + + rayClusterSpec.HeadGroupSpec.Template.Spec.ServiceAccountName = serviceAccountName + for index := range rayClusterSpec.WorkerGroupSpecs { + rayClusterSpec.WorkerGroupSpecs[index].Template.Spec.ServiceAccountName = serviceAccountName + } + + shutdownAfterJobFinishes := cfg.ShutdownAfterJobFinishes + ttlSecondsAfterFinished := &cfg.TTLSecondsAfterFinished + if rayJob.ShutdownAfterJobFinishes { + shutdownAfterJobFinishes = true + ttlSecondsAfterFinished = &rayJob.TtlSecondsAfterFinished + } + + submitterPodTemplate := buildSubmitterPodTemplate(&rayClusterSpec) + + // TODO: This is for backward compatibility. Remove this block once runtime_env is removed from ray proto. + var runtimeEnvYaml string + runtimeEnvYaml = rayJob.RuntimeEnvYaml + // If runtime_env exists but runtime_env_yaml does not, convert runtime_env to runtime_env_yaml + if rayJob.RuntimeEnv != "" && rayJob.RuntimeEnvYaml == "" { + runtimeEnvYaml, err = convertBase64RuntimeEnvToYaml(rayJob.RuntimeEnv) + if err != nil { + return nil, err + } + } + + jobSpec := rayv1.RayJobSpec{ + RayClusterSpec: &rayClusterSpec, + Entrypoint: strings.Join(primaryContainer.Args, " "), + ShutdownAfterJobFinishes: shutdownAfterJobFinishes, + TTLSecondsAfterFinished: *ttlSecondsAfterFinished, + RuntimeEnvYAML: runtimeEnvYaml, + SubmitterPodTemplate: &submitterPodTemplate, + } + + return &rayv1.RayJob{ + TypeMeta: metav1.TypeMeta{ + Kind: KindRayJob, + APIVersion: rayv1.SchemeGroupVersion.String(), + }, + Spec: jobSpec, + ObjectMeta: *objectMeta, + }, nil +} + +func convertBase64RuntimeEnvToYaml(s string) (string, error) { + // Decode from base64 + data, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", err + } + + // Unmarshal JSON + var obj map[string]interface{} + err = json.Unmarshal(data, &obj) + if err != nil { + return "", err + } + + // Convert to YAML + y, err := yaml.Marshal(&obj) + if err != nil { + return "", err + } + + return string(y), nil +} + +func injectLogsSidecar(primaryContainer *v1.Container, podSpec *v1.PodSpec) { + cfg := GetConfig() + if cfg.LogsSidecar == nil { + return + } + sidecar := cfg.LogsSidecar.DeepCopy() + + // Ray logs integration + var rayStateVolMount *v1.VolumeMount + // Look for an existing volume mount on the primary container, mounted at /tmp/ray + for _, vm := range primaryContainer.VolumeMounts { + if vm.MountPath == rayStateMountPath { + vm := vm + rayStateVolMount = &vm + break + } + } + // No existing volume mount exists at /tmp/ray. We create a new volume and volume + // mount and add it to the pod and container specs respectively + if rayStateVolMount == nil { + vol := v1.Volume{ + Name: defaultRayStateVolName, + VolumeSource: v1.VolumeSource{ + EmptyDir: &v1.EmptyDirVolumeSource{}, + }, + } + podSpec.Volumes = append(podSpec.Volumes, vol) + volMount := v1.VolumeMount{ + Name: defaultRayStateVolName, + MountPath: rayStateMountPath, + } + primaryContainer.VolumeMounts = append(primaryContainer.VolumeMounts, volMount) + rayStateVolMount = &volMount + } + // We need to mirror the ray state volume mount into the sidecar as readonly, + // so that we can read the logs written by the head node. + readOnlyRayStateVolMount := *rayStateVolMount.DeepCopy() + readOnlyRayStateVolMount.ReadOnly = true + + // Update volume mounts on sidecar + // If one already exists with the desired mount path, simply replace it. Otherwise, + // add it to sidecar's volume mounts. + foundExistingSidecarVolMount := false + for idx, vm := range sidecar.VolumeMounts { + if vm.MountPath == rayStateMountPath { + foundExistingSidecarVolMount = true + sidecar.VolumeMounts[idx] = readOnlyRayStateVolMount + } + } + if !foundExistingSidecarVolMount { + sidecar.VolumeMounts = append(sidecar.VolumeMounts, readOnlyRayStateVolMount) + } + + // Add sidecar to containers + podSpec.Containers = append(podSpec.Containers, *sidecar) +} + +func buildHeadPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodSpec, objectMeta *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext, spec *plugins.HeadGroupSpec) (v1.PodTemplateSpec, error) { + // Some configs are copy from https://github.com/ray-project/kuberay/blob/b72e6bdcd9b8c77a9dc6b5da8560910f3a0c3ffd/apiserver/pkg/util/cluster.go#L97 + // They should always be the same, so we could hard code here. + primaryContainer.Name = RayHeadContainerName + + envs := []v1.EnvVar{ + { + Name: "MY_POD_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + } + + // Removed 'a0 ..' / 'pyflyte-execute ..' args from the pod spec. + primaryContainer.Args = []string{} + primaryContainer.Env = append(primaryContainer.Env, envs...) + + ports := []v1.ContainerPort{ + { + Name: "redis", + ContainerPort: 6379, + }, + { + Name: "head", + ContainerPort: 10001, + }, + { + Name: "dashboard", + ContainerPort: 8265, + }, + } + + primaryContainer.Ports = append(primaryContainer.Ports, ports...) + + // Inject a sidecar for capturing and exposing Ray job logs + injectLogsSidecar(primaryContainer, basePodSpec) + + basePodSpec, err := mergeCustomPodSpec(basePodSpec, spec.GetK8SPod()) + if err != nil { + return v1.PodTemplateSpec{}, err + } + + basePodSpec = flytek8s.AddTolerationsForExtendedResources(basePodSpec) + + podTemplateSpec := v1.PodTemplateSpec{ + Spec: *basePodSpec, + ObjectMeta: *objectMeta, + } + cfg := config.GetK8sPluginConfig() + podTemplateSpec.SetLabels(utils.UnionMaps(cfg.DefaultLabels, podTemplateSpec.GetLabels(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels()), spec.GetK8SPod().GetMetadata().GetLabels())) + podTemplateSpec.SetAnnotations(utils.UnionMaps(cfg.DefaultAnnotations, podTemplateSpec.GetAnnotations(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations()), spec.GetK8SPod().GetMetadata().GetAnnotations())) + + return podTemplateSpec, nil +} + +func buildSubmitterPodTemplate(rayClusterSpec *rayv1.RayClusterSpec) v1.PodTemplateSpec { + + submitterPodSpec := rayClusterSpec.HeadGroupSpec.Template.DeepCopy() + + submitterPodSpec.Spec.Containers = []v1.Container{ + { + Name: "ray-job-submitter", + // Use the image of the Ray head to be defensive against version mismatch issues + Image: rayClusterSpec.HeadGroupSpec.Template.Spec.Containers[0].Image, + Resources: submitterDefaultResourceRequirements, + }, + } + + return *submitterPodSpec +} + +func buildWorkerPodTemplate(primaryContainer *v1.Container, basePodSpec *v1.PodSpec, objectMetadata *metav1.ObjectMeta, taskCtx pluginsCore.TaskExecutionContext, spec *plugins.WorkerGroupSpec) (v1.PodTemplateSpec, error) { + // Some configs are copy from https://github.com/ray-project/kuberay/blob/b72e6bdcd9b8c77a9dc6b5da8560910f3a0c3ffd/apiserver/pkg/util/cluster.go#L185 + // They should always be the same, so we could hard code here. + + primaryContainer.Name = "ray-worker" + + primaryContainer.Args = []string{} + + envs := []v1.EnvVar{ + { + Name: "RAY_DISABLE_DOCKER_CPU_WARNING", + Value: "1", + }, + { + Name: "TYPE", + Value: "worker", + }, + { + Name: "CPU_REQUEST", + ValueFrom: &v1.EnvVarSource{ + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "ray-worker", + Resource: "requests.cpu", + }, + }, + }, + { + Name: "CPU_LIMITS", + ValueFrom: &v1.EnvVarSource{ + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "ray-worker", + Resource: "limits.cpu", + }, + }, + }, + { + Name: "MEMORY_REQUESTS", + ValueFrom: &v1.EnvVarSource{ + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "ray-worker", + Resource: "requests.cpu", + }, + }, + }, + { + Name: "MEMORY_LIMITS", + ValueFrom: &v1.EnvVarSource{ + ResourceFieldRef: &v1.ResourceFieldSelector{ + ContainerName: "ray-worker", + Resource: "limits.cpu", + }, + }, + }, + { + Name: "MY_POD_NAME", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "metadata.name", + }, + }, + }, + { + Name: "MY_POD_IP", + ValueFrom: &v1.EnvVarSource{ + FieldRef: &v1.ObjectFieldSelector{ + FieldPath: "status.podIP", + }, + }, + }, + } + + primaryContainer.Env = append(primaryContainer.Env, envs...) + + primaryContainer.Lifecycle = &v1.Lifecycle{ + PreStop: &v1.LifecycleHandler{ + Exec: &v1.ExecAction{ + Command: []string{ + "/bin/sh", "-c", "ray stop", + }, + }, + }, + } + + ports := []v1.ContainerPort{ + { + Name: "redis", + ContainerPort: 6379, + }, + { + Name: "head", + ContainerPort: 10001, + }, + { + Name: "dashboard", + ContainerPort: 8265, + }, + } + primaryContainer.Ports = append(primaryContainer.Ports, ports...) + + basePodSpec, err := mergeCustomPodSpec(basePodSpec, spec.GetK8SPod()) + if err != nil { + return v1.PodTemplateSpec{}, err + } + + basePodSpec = flytek8s.AddTolerationsForExtendedResources(basePodSpec) + + podTemplateSpec := v1.PodTemplateSpec{ + Spec: *basePodSpec, + ObjectMeta: *objectMetadata, + } + cfg := config.GetK8sPluginConfig() + podTemplateSpec.SetLabels(utils.UnionMaps(cfg.DefaultLabels, podTemplateSpec.GetLabels(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels()), spec.GetK8SPod().GetMetadata().GetLabels())) + podTemplateSpec.SetAnnotations(utils.UnionMaps(cfg.DefaultAnnotations, podTemplateSpec.GetAnnotations(), utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations()), spec.GetK8SPod().GetMetadata().GetAnnotations())) + return podTemplateSpec, nil +} + +// Merges a ray head/worker node custom pod specs onto task's generated pod spec +func mergeCustomPodSpec(podSpec *v1.PodSpec, k8sPod *core.K8SPod) (*v1.PodSpec, error) { + if k8sPod == nil { + return podSpec, nil + } + + if k8sPod.PodSpec == nil { + return podSpec, nil + } + + var customPodSpec *v1.PodSpec + + err := utils.UnmarshalStructToObj(k8sPod.PodSpec, &customPodSpec) + if err != nil { + return nil, flyteerr.Errorf(flyteerr.BadTaskSpecification, + "Unable to unmarshal pod spec [%v], Err: [%v]", k8sPod.PodSpec, err.Error()) + } + + podSpec, err = flytek8s.MergeOverlayPodSpecOntoBase(podSpec, customPodSpec) + if err != nil { + return nil, err + } + + return podSpec, nil +} + +func (rayJobResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return &rayv1.RayJob{ + TypeMeta: metav1.TypeMeta{ + Kind: KindRayJob, + APIVersion: rayv1.SchemeGroupVersion.String(), + }, + }, nil +} + +func getEventInfoForRayJob(ctx context.Context, logConfig logs.LogConfig, pluginContext k8s.PluginContext, rayJob *rayv1.RayJob) (*pluginsCore.TaskInfo, error) { + logPlugin, err := logs.InitializeLogPlugins(&logConfig) + if err != nil { + return nil, fmt.Errorf("failed to initialize log plugins. Error: %w", err) + } + + var taskLogs []*core.TaskLog + taskExecID := pluginContext.TaskExecutionMetadata().GetTaskExecutionID() + podList := &v1.PodList{} + err = pluginContext.K8sReader().List(ctx, podList) + if err != nil { + return nil, fmt.Errorf("failed to list node execution pods. Error: %w", err) + } + var enableVscode bool + if rayJob.Spec.RayClusterSpec != nil && + rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Containers != nil && + len(rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Containers) > 0 { + enableVscode = flytek8s.IsVscodeEnabled(ctx, rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Containers[0].Env) + } + input := tasklog.Input{ + PodName: fmt.Sprintf("%s-head", rayJob.Status.RayClusterName), + Namespace: rayJob.Namespace, + TaskExecutionID: taskExecID, + ExtraTemplateVars: []tasklog.TemplateVar{}, + EnableVscode: enableVscode, + } + if rayJob.Status.JobId != "" { + input.ExtraTemplateVars = append( + input.ExtraTemplateVars, + tasklog.TemplateVar{ + Regex: logTemplateRegexes.RayJobID, + Value: rayJob.Status.JobId, + }, + ) + } + if rayJob.Status.RayClusterName != "" { + input.ExtraTemplateVars = append( + input.ExtraTemplateVars, + tasklog.TemplateVar{ + Regex: logTemplateRegexes.RayClusterName, + Value: rayJob.Status.RayClusterName, + }, + ) + } + + // TODO: Retrieve the name of head pod from rayJob.status, and add it to task logs + // RayJob CRD does not include the name of the worker or head pod for now + logOutput, err := logPlugin.GetTaskLogs(input) + if err != nil { + return nil, fmt.Errorf("failed to generate task logs. Error: %w", err) + } + taskLogs = append(taskLogs, logOutput.TaskLogs...) + + // Handling for Ray Dashboard + dashboardURLTemplate := GetConfig().DashboardURLTemplate + if dashboardURLTemplate != nil && + rayJob.Status.DashboardURL != "" && + rayJob.Status.JobStatus == rayv1.JobStatusRunning { + dashboardURLTemplate.LinkType = core.TaskLog_DASHBOARD.String() + dashboardURLOutput, err := dashboardURLTemplate.GetTaskLogs(input) + if err != nil { + return nil, fmt.Errorf("failed to generate Ray dashboard link. Error: %w", err) + } + taskLogs = append(taskLogs, dashboardURLOutput.TaskLogs...) + } + + return &pluginsCore.TaskInfo{ + Logs: taskLogs, + LogContext: logContextForPods(rayJob.Name, podList.Items), + }, nil +} + +func isRayHeadReady(ctx context.Context, rayJobName string, pluginContext k8s.PluginContext) (bool, error) { + podList := &v1.PodList{} + err := pluginContext.K8sReader().List(ctx, podList) + if err != nil { + return false, fmt.Errorf("failed to list node execution pods. Error: %w", err) + } + pods := lo.Filter(podList.Items, func(pod v1.Pod, _ int) bool { + return pod.Status.Phase != v1.PodPending && strings.HasPrefix(pod.Name, rayJobName) && strings.Contains(pod.Name, "head") && flytek8s.GetPrimaryContainerName(&pod) == RayHeadContainerName + }) + if len(pods) == 0 { + return false, nil + } else if len(pods) == 1 { + return pod.IsPodReady(&pods[0]), nil + } + + // More than one head pod. Should not happen. + logger.Debug(ctx, "Cannot determine Ray head readiness: more than one head pod found") + return true, fmt.Errorf("more than one head pod found for Ray job %s", rayJobName) +} + +func logContextForPods(rayJobName string, pods []v1.Pod) *core.LogContext { + pods = lo.Filter(pods, func(item v1.Pod, _ int) bool { + // Running, Succeeded or Failed is OK + return item.Status.Phase != v1.PodPending + }) + logCtx := &core.LogContext{ + Pods: make([]*core.PodLogContext, len(pods)), + } + for i, pod := range pods { + p := pod + // Ray job has name like `az6dh2bxk6wnxn2xv8l6-n0-0` + // Ray head primary pod has name like `az6dh2bxk6wnxn2xv8l6-n0-0-raycluster-szwgz-head-z59ss` + if strings.HasPrefix(p.Name, rayJobName) && strings.Contains(p.Name, "head") && flytek8s.GetPrimaryContainerName(&p) == RayHeadContainerName { + logCtx.PrimaryPodName = p.Name + } + logCtx.Pods[i] = flytek8s.BuildPodLogContext(&p) + } + return logCtx +} + +func (plugin rayJobResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) { + rayJob := resource.(*rayv1.RayJob) + info, err := getEventInfoForRayJob(ctx, GetConfig().Logs, pluginContext, rayJob) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + if len(rayJob.Status.JobDeploymentStatus) == 0 { + return pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "Scheduling", info), nil + } + + podName := fmt.Sprintf("%s-head", rayJob.Status.RayClusterName) + phaseInfo, err := flytek8s.DemystifyFailedOrPendingPod(ctx, pluginContext, *info, rayJob.Namespace, podName, RayHeadContainerName) + if err != nil { + logger.Errorf(ctx, "Failed to demystify pod status for ray head node. Error: %v", err) + } + if phaseInfo.Phase().IsFailure() { + // If the ray head node is in a failure state, we can fail fast without checking the RayJob status. + return phaseInfo, nil + } + + // KubeRay creates a Ray cluster first, and then submits a Ray job to the cluster + switch rayJob.Status.JobDeploymentStatus { + case rayv1.JobDeploymentStatusInitializing: + phaseInfo, err = pluginsCore.PhaseInfoInitializing(rayJob.CreationTimestamp.Time, pluginsCore.DefaultPhaseVersion, "cluster is creating", info), nil + case rayv1.JobDeploymentStatusRunning: + phaseInfo, err = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info), nil + case rayv1.JobDeploymentStatusComplete: + phaseInfo, err = pluginsCore.PhaseInfoSuccess(info), nil + case rayv1.JobDeploymentStatusSuspending: + return pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "cluster is about to be suspended", info), nil + case rayv1.JobDeploymentStatusSuspended: + return pluginsCore.PhaseInfoQueuedWithTaskInfo(time.Now(), pluginsCore.DefaultPhaseVersion, "cluster is suspended", info), nil + case rayv1.JobDeploymentStatusFailed: + failInfo := fmt.Sprintf("Failed to run Ray job %s with error: [%s] %s", rayJob.Name, rayJob.Status.Reason, rayJob.Status.Message) + phaseInfo, err = pluginsCore.PhaseInfoFailure(flyteerr.TaskFailedWithError, failInfo, info), nil + default: + // We already handle all known deployment status, so this should never happen unless a future version of ray + // introduced a new job status. + phaseInfo, err = pluginsCore.PhaseInfoUndefined, fmt.Errorf("unknown job deployment status: %s", rayJob.Status.JobDeploymentStatus) + } + + if ready, err := isRayHeadReady(ctx, rayJob.Name, pluginContext); err != nil { + logger.Warnf(ctx, "Failed to determine Ray dashboard readiness. Error: %v", err) + } else { + for _, tl := range info.Logs { + if tl != nil && tl.LinkType == core.TaskLog_DASHBOARD { + tl.Ready = ready + if !ready || phaseInfo.Phase() < pluginsCore.PhaseRunning { + phaseInfo.WithReason("Ray dashboard is not ready") + } else { + phaseInfo.WithReason("Ray dashboard is ready") + } + } else if tl != nil && tl.LinkType == core.TaskLog_IDE { + tl.Ready = ready + if !ready || phaseInfo.Phase() != pluginsCore.PhaseRunning { + phaseInfo.WithReason("Vscode server is not ready") + } else { + phaseInfo.WithReason("Vscode server is ready") + } + } + } + } + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + + return phaseInfo, err +} + +func init() { + if err := rayv1.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: rayTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{rayTaskType}, + ResourceToWatch: &rayv1.RayJob{}, + Plugin: rayJobResourceHandler{}, + IsDefault: false, + CustomKubeClient: func(ctx context.Context) (pluginsCore.KubeClient, error) { + remoteConfig := GetConfig().RemoteClusterConfig + if !remoteConfig.Enabled { + // use controller-runtime KubeClient + return nil, nil + } + + kubeConfig, err := k8s.KubeClientConfig(remoteConfig.Endpoint, remoteConfig.Auth) + if err != nil { + return nil, err + } + + return k8s.NewDefaultKubeClient(kubeConfig) + }, + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go new file mode 100644 index 0000000000..402e2ef669 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/ray_test.go @@ -0,0 +1,1534 @@ +package ray + +import ( + "context" + "encoding/json" + "reflect" + "testing" + "time" + + rayv1 "github.com/ray-project/kuberay/ray-operator/apis/ray/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const ( + testImage = "image://" + serviceAccount = "ray_sa" +) + +var ( + dummyEnvVars = []*core.KeyValuePair{ + {Key: "Env_Var", Value: "Env_Val"}, + } + + testArgs = []string{ + "test-args", + } + + resourceRequirements = &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("1000m"), + corev1.ResourceMemory: resource.MustParse("1Gi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + Requests: corev1.ResourceList{ + corev1.ResourceCPU: resource.MustParse("100m"), + corev1.ResourceMemory: resource.MustParse("512Mi"), + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + } + + workerGroupName = "worker-group" +) + +func transformRayJobToCustomObj(rayJob *plugins.RayJob) *structpb.Struct { + structObj, err := utils.MarshalObjToStruct(rayJob) + if err != nil { + panic(err) + } + return structObj +} + +func transformPodSpecToTaskTemplateTarget(podSpec *corev1.PodSpec) *core.TaskTemplate_K8SPod { + structObj, err := utils.MarshalObjToStruct(&podSpec) + if err != nil { + panic(err) + } + return &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: structObj, + }, + } +} + +func dummyRayCustomObj() *plugins.RayJob { + return &plugins.RayJob{ + RayCluster: &plugins.RayCluster{ + HeadGroupSpec: &plugins.HeadGroupSpec{RayStartParams: map[string]string{"num-cpus": "1"}}, + WorkerGroupSpec: []*plugins.WorkerGroupSpec{{GroupName: workerGroupName, Replicas: 3, MinReplicas: 3, MaxReplicas: 3}}, + EnableAutoscaling: true, + }, + ShutdownAfterJobFinishes: true, + TtlSecondsAfterFinished: 120, + } +} + +func dummyRayTaskTemplate(id string, rayJob *plugins.RayJob) *core.TaskTemplate { + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + Env: dummyEnvVars, + }, + }, + Custom: transformRayJobToCustomObj(rayJob), + } +} + +func dummyRayTaskContext(taskTemplate *core.TaskTemplate, resources *corev1.ResourceRequirements, extendedResources *core.ExtendedResources, containerImage, serviceAccount string) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + taskCtx.OnOutputWriter().Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.OnGetGeneratedName().Return("some-acceptable-name") + + overrides := &mocks.TaskOverrides{} + overrides.OnGetResources().Return(resources) + overrides.OnGetExtendedResources().Return(extendedResources) + overrides.OnGetContainerImage().Return(containerImage) + overrides.OnGetPodTemplate().Return(nil) + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.OnGetTaskExecutionID().Return(tID) + taskExecutionMetadata.OnGetNamespace().Return("test-namespace") + taskExecutionMetadata.OnGetAnnotations().Return(map[string]string{"annotation-1": "val1"}) + taskExecutionMetadata.OnGetLabels().Return(map[string]string{"label-1": "val1"}) + taskExecutionMetadata.OnGetOwnerReference().Return(metav1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.OnIsInterruptible().Return(true) + taskExecutionMetadata.OnGetOverrides().Return(overrides) + taskExecutionMetadata.OnGetK8sServiceAccount().Return(serviceAccount) + taskExecutionMetadata.OnGetPlatformResources().Return(&corev1.ResourceRequirements{}) + taskExecutionMetadata.OnGetSecurityContext().Return(core.SecurityContext{ + RunAs: &core.Identity{K8SServiceAccount: serviceAccount}, + }) + taskExecutionMetadata.OnGetEnvironmentVariables().Return(nil) + taskExecutionMetadata.OnGetConsoleURL().Return("") + taskCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + return taskCtx +} + +func TestBuildResourceRay(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + taskTemplate := dummyRayTaskTemplate("ray-id", dummyRayCustomObj()) + toleration := []corev1.Toleration{{ + Key: "storage", + Value: "dedicated", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }} + err := config.SetK8sPluginConfig(&config.K8sPluginConfig{DefaultTolerations: toleration}) + assert.Nil(t, err) + + rayCtx := dummyRayTaskContext(taskTemplate, resourceRequirements, nil, "", serviceAccount) + RayResource, err := rayJobResourceHandler.BuildResource(context.TODO(), rayCtx) + assert.Nil(t, err) + + assert.NotNil(t, RayResource) + ray, ok := RayResource.(*rayv1.RayJob) + assert.True(t, ok) + + assert.Equal(t, *ray.Spec.RayClusterSpec.EnableInTreeAutoscaling, true) + assert.Equal(t, ray.Spec.ShutdownAfterJobFinishes, true) + assert.Equal(t, ray.Spec.TTLSecondsAfterFinished, int32(120)) + + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.ServiceAccountName, serviceAccount) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.RayStartParams, + map[string]string{ + "dashboard-host": "0.0.0.0", "disable-usage-stats": "true", "include-dashboard": "true", + "node-ip-address": "$MY_POD_IP", "num-cpus": "1", + }) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Annotations, map[string]string{"annotation-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Labels, map[string]string{"label-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Tolerations, toleration) + + workerReplica := int32(3) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Replicas, workerReplica) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].MinReplicas, workerReplica) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].MaxReplicas, workerReplica) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].GroupName, workerGroupName) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec.ServiceAccountName, serviceAccount) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].RayStartParams, map[string]string{"disable-usage-stats": "true", "node-ip-address": "$MY_POD_IP"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Annotations, map[string]string{"annotation-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Labels, map[string]string{"label-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec.Tolerations, toleration) + + // Make sure the default service account is being used if SA is not provided in the task context + rayCtx = dummyRayTaskContext(taskTemplate, resourceRequirements, nil, "", "") + RayResource, err = rayJobResourceHandler.BuildResource(context.TODO(), rayCtx) + assert.Nil(t, err) + assert.NotNil(t, RayResource) + ray, ok = RayResource.(*rayv1.RayJob) + assert.True(t, ok) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.ServiceAccountName, GetConfig().ServiceAccount) +} + +func TestBuildResourceRayContainerImage(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{})) + + fixtures := []struct { + name string + resources *corev1.ResourceRequirements + containerImageOverride string + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "", + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + "container-image-override", + }, + } + + for _, f := range fixtures { + t.Run(f.name, func(t *testing.T) { + taskTemplate := dummyRayTaskTemplate("id", dummyRayCustomObj()) + taskContext := dummyRayTaskContext(taskTemplate, f.resources, nil, f.containerImageOverride, serviceAccount) + rayJobResourceHandler := rayJobResourceHandler{} + r, err := rayJobResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + rayJob, ok := r.(*rayv1.RayJob) + assert.True(t, ok) + + var expectedContainerImage string + if len(f.containerImageOverride) > 0 { + expectedContainerImage = f.containerImageOverride + } else { + expectedContainerImage = testImage + } + + // Head node + headNodeSpec := rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec + assert.Equal(t, expectedContainerImage, headNodeSpec.Containers[0].Image) + + // Worker node + workerNodeSpec := rayJob.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec + assert.Equal(t, expectedContainerImage, workerNodeSpec.Containers[0].Image) + }) + } +} + +func TestBuildResourceRayExtendedResources(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{ + GpuDeviceNodeLabel: "gpu-node-label", + GpuPartitionSizeNodeLabel: "gpu-partition-size", + GpuResourceName: flytek8s.ResourceNvidiaGPU, + AddTolerationsForExtendedResources: []string{"nvidia.com/gpu"}, + })) + + params := []struct { + name string + resources *corev1.ResourceRequirements + extendedResourcesBase *core.ExtendedResources + extendedResourcesOverride *core.ExtendedResources + expectedNsr []corev1.NodeSelectorTerm + expectedTol []corev1.Toleration + }{ + { + "without overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + nil, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-t4"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-t4", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + { + "with overrides", + &corev1.ResourceRequirements{ + Limits: corev1.ResourceList{ + flytek8s.ResourceNvidiaGPU: resource.MustParse("1"), + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-t4", + }, + }, + &core.ExtendedResources{ + GpuAccelerator: &core.GPUAccelerator{ + Device: "nvidia-tesla-a100", + PartitionSizeValue: &core.GPUAccelerator_PartitionSize{ + PartitionSize: "1g.5gb", + }, + }, + }, + []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "gpu-node-label", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"nvidia-tesla-a100"}, + }, + { + Key: "gpu-partition-size", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"1g.5gb"}, + }, + }, + }, + }, + []corev1.Toleration{ + { + Key: "gpu-node-label", + Value: "nvidia-tesla-a100", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "gpu-partition-size", + Value: "1g.5gb", + Operator: corev1.TolerationOpEqual, + Effect: corev1.TaintEffectNoSchedule, + }, + { + Key: "nvidia.com/gpu", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }, + }, + }, + } + + for _, p := range params { + t.Run(p.name, func(t *testing.T) { + taskTemplate := dummyRayTaskTemplate("ray-id", dummyRayCustomObj()) + taskTemplate.ExtendedResources = p.extendedResourcesBase + taskContext := dummyRayTaskContext(taskTemplate, p.resources, p.extendedResourcesOverride, "", serviceAccount) + rayJobResourceHandler := rayJobResourceHandler{} + r, err := rayJobResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + rayJob, ok := r.(*rayv1.RayJob) + assert.True(t, ok) + + // Head node + headNodeSpec := rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec + assert.EqualValues( + t, + p.expectedNsr, + headNodeSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + p.expectedTol, + headNodeSpec.Tolerations, + ) + + // Worker node + workerNodeSpec := rayJob.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec + assert.EqualValues( + t, + p.expectedNsr, + workerNodeSpec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, + ) + assert.EqualValues( + t, + p.expectedTol, + workerNodeSpec.Tolerations, + ) + }) + } +} + +func TestBuildResourceRayCustomK8SPod(t *testing.T) { + assert.NoError(t, config.SetK8sPluginConfig(&config.K8sPluginConfig{})) + + headResourceEntries := []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "10"}, + {Name: core.Resources_MEMORY, Value: "10Gi"}, + {Name: core.Resources_GPU, Value: "10"}, + } + headResources := &core.Resources{Requests: headResourceEntries, Limits: headResourceEntries} + + expectedHeadResources, err := flytek8s.ToK8sResourceRequirements(headResources) + require.NoError(t, err) + // Add nvidia.com/gpu from task resources since mergeCustomPodSpec only replaces resources + expectedHeadResources.Limits[flytek8s.ResourceNvidiaGPU] = resource.MustParse("1") + expectedHeadResources.Requests[flytek8s.ResourceNvidiaGPU] = resource.MustParse("1") + + workerResourceEntries := []*core.Resources_ResourceEntry{ + {Name: core.Resources_CPU, Value: "20"}, + {Name: core.Resources_MEMORY, Value: "20Gi"}, + {Name: core.Resources_GPU, Value: "20"}, + } + workerResources := &core.Resources{Requests: workerResourceEntries, Limits: workerResourceEntries} + + expectedWorkerResources, err := flytek8s.ToK8sResourceRequirements(workerResources) + require.NoError(t, err) + // Add nvidia.com/gpu from task resources since mergeCustomPodSpec only replaces resources + expectedWorkerResources.Limits[flytek8s.ResourceNvidiaGPU] = resource.MustParse("1") + expectedWorkerResources.Requests[flytek8s.ResourceNvidiaGPU] = resource.MustParse("1") + + nvidiaRuntimeClassName := "nvidia-cdi" + + headPodSpecCustomResources := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-head", + Resources: *expectedHeadResources, + }, + }, + } + workerPodSpecCustomResources := &corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-worker", + Resources: *expectedWorkerResources, + }, + }, + } + + headPodSpecCustomRuntimeClass := &corev1.PodSpec{ + RuntimeClassName: &nvidiaRuntimeClassName, + } + workerPodSpecCustomRuntimeClass := &corev1.PodSpec{ + RuntimeClassName: &nvidiaRuntimeClassName, + } + + params := []struct { + name string + taskResources *corev1.ResourceRequirements + headK8SPod *core.K8SPod + workerK8SPod *core.K8SPod + expectedSubmitterResources *corev1.ResourceRequirements + expectedHeadResources *corev1.ResourceRequirements + expectedWorkerResources *corev1.ResourceRequirements + expectedSubmitterRuntimeClassName *string + expectedHeadRuntimeClassName *string + expectedWorkerRuntimeClassName *string + }{ + { + name: "task resources", + taskResources: resourceRequirements, + expectedSubmitterResources: &submitterDefaultResourceRequirements, + expectedHeadResources: resourceRequirements, + expectedWorkerResources: resourceRequirements, + }, + { + name: "custom worker and head resources", + taskResources: resourceRequirements, + headK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, headPodSpecCustomResources), + }, + workerK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, workerPodSpecCustomResources), + }, + expectedSubmitterResources: &submitterDefaultResourceRequirements, + expectedHeadResources: expectedHeadResources, + expectedWorkerResources: expectedWorkerResources, + }, + { + name: "custom runtime class name", + taskResources: resourceRequirements, + expectedSubmitterResources: &submitterDefaultResourceRequirements, + expectedHeadResources: resourceRequirements, + expectedWorkerResources: resourceRequirements, + headK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, headPodSpecCustomRuntimeClass), + }, + workerK8SPod: &core.K8SPod{ + PodSpec: transformStructToStructPB(t, workerPodSpecCustomRuntimeClass), + }, + expectedHeadRuntimeClassName: &nvidiaRuntimeClassName, + expectedWorkerRuntimeClassName: &nvidiaRuntimeClassName, + }, + } + + for _, p := range params { + t.Run(p.name, func(t *testing.T) { + rayJobInput := dummyRayCustomObj() + + if p.headK8SPod != nil { + rayJobInput.RayCluster.HeadGroupSpec.K8SPod = p.headK8SPod + } + + if p.workerK8SPod != nil { + for _, spec := range rayJobInput.RayCluster.WorkerGroupSpec { + spec.K8SPod = p.workerK8SPod + } + } + + taskTemplate := dummyRayTaskTemplate("ray-id", rayJobInput) + taskContext := dummyRayTaskContext(taskTemplate, p.taskResources, nil, "", serviceAccount) + rayJobResourceHandler := rayJobResourceHandler{} + r, err := rayJobResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + rayJob, ok := r.(*rayv1.RayJob) + assert.True(t, ok) + + submitterPodResources := rayJob.Spec.SubmitterPodTemplate.Spec.Containers[0].Resources + assert.EqualValues(t, + p.expectedSubmitterResources, + &submitterPodResources, + ) + + headPodSpec := rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec + headPodResources := headPodSpec.Containers[0].Resources + assert.EqualValues(t, + p.expectedHeadResources, + &headPodResources, + ) + + assert.EqualValues(t, p.expectedHeadRuntimeClassName, headPodSpec.RuntimeClassName) + + for _, workerGroupSpec := range rayJob.Spec.RayClusterSpec.WorkerGroupSpecs { + workerPodSpec := workerGroupSpec.Template.Spec + workerPodResources := workerPodSpec.Containers[0].Resources + assert.EqualValues(t, + p.expectedWorkerResources, + &workerPodResources, + ) + assert.EqualValues(t, p.expectedWorkerRuntimeClassName, workerPodSpec.RuntimeClassName) + } + }) + } +} + +func TestDefaultStartParameters(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + rayJob := &plugins.RayJob{ + RayCluster: &plugins.RayCluster{ + HeadGroupSpec: &plugins.HeadGroupSpec{}, + WorkerGroupSpec: []*plugins.WorkerGroupSpec{{GroupName: workerGroupName, Replicas: 3, MinReplicas: 3, MaxReplicas: 3}}, + EnableAutoscaling: true, + }, + ShutdownAfterJobFinishes: true, + TtlSecondsAfterFinished: 120, + } + + taskTemplate := dummyRayTaskTemplate("ray-id", rayJob) + toleration := []corev1.Toleration{{ + Key: "storage", + Value: "dedicated", + Operator: corev1.TolerationOpExists, + Effect: corev1.TaintEffectNoSchedule, + }} + err := config.SetK8sPluginConfig(&config.K8sPluginConfig{DefaultTolerations: toleration}) + assert.Nil(t, err) + + RayResource, err := rayJobResourceHandler.BuildResource(context.TODO(), dummyRayTaskContext(taskTemplate, resourceRequirements, nil, "", serviceAccount)) + assert.Nil(t, err) + + assert.NotNil(t, RayResource) + ray, ok := RayResource.(*rayv1.RayJob) + assert.True(t, ok) + + assert.Equal(t, *ray.Spec.RayClusterSpec.EnableInTreeAutoscaling, true) + assert.Equal(t, ray.Spec.ShutdownAfterJobFinishes, true) + assert.Equal(t, ray.Spec.TTLSecondsAfterFinished, int32(120)) + + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.ServiceAccountName, serviceAccount) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.RayStartParams, + map[string]string{ + "dashboard-host": "0.0.0.0", "disable-usage-stats": "true", "include-dashboard": "true", + "node-ip-address": "$MY_POD_IP", + }) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Annotations, map[string]string{"annotation-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Labels, map[string]string{"label-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec.Tolerations, toleration) + + workerReplica := int32(3) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Replicas, workerReplica) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].MinReplicas, workerReplica) + assert.Equal(t, *ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].MaxReplicas, workerReplica) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].GroupName, workerGroupName) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec.ServiceAccountName, serviceAccount) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].RayStartParams, map[string]string{"disable-usage-stats": "true", "node-ip-address": "$MY_POD_IP"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Annotations, map[string]string{"annotation-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Labels, map[string]string{"label-1": "val1"}) + assert.Equal(t, ray.Spec.RayClusterSpec.WorkerGroupSpecs[0].Template.Spec.Tolerations, toleration) +} + +func TestInjectLogsSidecar(t *testing.T) { + rayJobObj := transformRayJobToCustomObj(dummyRayCustomObj()) + params := []struct { + name string + taskTemplate core.TaskTemplate + // primaryContainerName string + logsSidecarCfg *corev1.Container + expectedVolumes []corev1.Volume + expectedPrimaryContainerVolumeMounts []corev1.VolumeMount + expectedLogsSidecarVolumeMounts []corev1.VolumeMount + }{ + { + "container target", + core.TaskTemplate{ + Id: &core.Identifier{Name: "ray-id"}, + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + }, + }, + Custom: rayJobObj, + }, + &corev1.Container{ + Name: "logs-sidecar", + Image: "test-image", + }, + []corev1.Volume{ + { + Name: "system-ray-state", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + []corev1.VolumeMount{ + { + Name: "system-ray-state", + MountPath: "/tmp/ray", + }, + }, + []corev1.VolumeMount{ + { + Name: "system-ray-state", + MountPath: "/tmp/ray", + ReadOnly: true, + }, + }, + }, + { + "container target with no sidecar", + core.TaskTemplate{ + Id: &core.Identifier{Name: "ray-id"}, + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + }, + }, + Custom: rayJobObj, + }, + nil, + nil, + nil, + nil, + }, + { + "pod target", + core.TaskTemplate{ + Id: &core.Identifier{Name: "ray-id"}, + Target: transformPodSpecToTaskTemplateTarget(&corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "primary-image", + }, + }, + }), + Custom: rayJobObj, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "main", + }, + }, + &corev1.Container{ + Name: "logs-sidecar", + Image: "test-image", + }, + []corev1.Volume{ + { + Name: "system-ray-state", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + []corev1.VolumeMount{ + { + Name: "system-ray-state", + MountPath: "/tmp/ray", + }, + }, + []corev1.VolumeMount{ + { + Name: "system-ray-state", + MountPath: "/tmp/ray", + ReadOnly: true, + }, + }, + }, + { + "pod target with existing ray state volume", + core.TaskTemplate{ + Id: &core.Identifier{Name: "ray-id"}, + Target: transformPodSpecToTaskTemplateTarget(&corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "main", + Image: "primary-image", + VolumeMounts: []corev1.VolumeMount{ + { + Name: "test-vol", + MountPath: "/tmp/ray", + }, + }, + }, + }, + Volumes: []corev1.Volume{ + { + Name: "test-vol", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + }), + Custom: rayJobObj, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "main", + }, + }, + &corev1.Container{ + Name: "logs-sidecar", + Image: "test-image", + }, + []corev1.Volume{ + { + Name: "test-vol", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{}, + }, + }, + }, + []corev1.VolumeMount{ + { + Name: "test-vol", + MountPath: "/tmp/ray", + }, + }, + []corev1.VolumeMount{ + { + Name: "test-vol", + MountPath: "/tmp/ray", + ReadOnly: true, + }, + }, + }, + } + + for i := range params { + p := params[i] + t.Run(p.name, func(t *testing.T) { + assert.NoError(t, SetConfig(&Config{ + LogsSidecar: p.logsSidecarCfg, + })) + taskContext := dummyRayTaskContext(&p.taskTemplate, resourceRequirements, nil, "", serviceAccount) + rayJobResourceHandler := rayJobResourceHandler{} + r, err := rayJobResourceHandler.BuildResource(context.TODO(), taskContext) + assert.Nil(t, err) + assert.NotNil(t, r) + rayJob, ok := r.(*rayv1.RayJob) + assert.True(t, ok) + + headPodSpec := rayJob.Spec.RayClusterSpec.HeadGroupSpec.Template.Spec + + // Check volumes + assert.EqualValues(t, p.expectedVolumes, headPodSpec.Volumes) + + // Check containers and respective volume mounts + foundPrimaryContainer := false + foundLogsSidecar := false + for _, cnt := range headPodSpec.Containers { + if cnt.Name == RayHeadContainerName { + foundPrimaryContainer = true + assert.EqualValues( + t, + p.expectedPrimaryContainerVolumeMounts, + cnt.VolumeMounts, + ) + } + if p.logsSidecarCfg != nil && cnt.Name == p.logsSidecarCfg.Name { + foundLogsSidecar = true + assert.EqualValues( + t, + p.expectedLogsSidecarVolumeMounts, + cnt.VolumeMounts, + ) + } + } + assert.Equal(t, true, foundPrimaryContainer) + assert.Equal(t, p.logsSidecarCfg != nil, foundLogsSidecar) + }) + } +} + +func newPluginContext(pluginState k8s.PluginState) *k8smocks.PluginContext { + plg := &k8smocks.PluginContext{} + + taskExecID := &mocks.TaskExecutionID{} + taskExecID.OnGetID().Return(core.TaskExecutionIdentifier{ + TaskId: &core.Identifier{ + ResourceType: core.ResourceType_TASK, + Name: "my-task-name", + Project: "my-task-project", + Domain: "my-task-domain", + Version: "1", + }, + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my-execution-name", + Project: "my-execution-project", + Domain: "my-execution-domain", + }, + }, + RetryAttempt: 1, + }) + taskExecID.OnGetUniqueNodeID().Return("unique-node") + taskExecID.OnGetGeneratedName().Return("generated-name") + + tskCtx := &mocks.TaskExecutionMetadata{} + tskCtx.OnGetTaskExecutionID().Return(taskExecID) + plg.OnTaskExecutionMetadata().Return(tskCtx) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + + plg.OnPluginStateReader().Return(&pluginStateReaderMock) + + return plg +} + +func init() { + f := defaultConfig + f.Logs = logs.LogConfig{ + IsKubernetesEnabled: true, + } + + if err := SetConfig(&f); err != nil { + panic(err) + } +} + +func TestGetTaskPhase(t *testing.T) { + ctx := context.Background() + rayJobResourceHandler := rayJobResourceHandler{} + pluginCtx := rayPluginContext(k8s.PluginState{}) + + testCases := []struct { + rayJobPhase rayv1.JobDeploymentStatus + expectedCorePhase pluginsCore.Phase + expectedError bool + }{ + {rayv1.JobDeploymentStatusInitializing, pluginsCore.PhaseInitializing, false}, + {rayv1.JobDeploymentStatusRunning, pluginsCore.PhaseRunning, false}, + {rayv1.JobDeploymentStatusComplete, pluginsCore.PhaseSuccess, false}, + {rayv1.JobDeploymentStatusFailed, pluginsCore.PhasePermanentFailure, false}, + {rayv1.JobDeploymentStatusSuspended, pluginsCore.PhaseQueued, false}, + {rayv1.JobDeploymentStatusSuspending, pluginsCore.PhaseQueued, false}, + } + + startTime := time.Date(2024, 0, 0, 0, 0, 0, 0, time.UTC) + endTime := startTime.Add(time.Hour) + podName, contName, initCont := "ray-clust-ray-head", "ray-head", "init" + logCtx := &core.LogContext{ + PrimaryPodName: podName, + Pods: []*core.PodLogContext{ + { + Namespace: "ns", + PodName: podName, + PrimaryContainerName: contName, + Containers: []*core.ContainerContext{ + { + ContainerName: contName, + Process: &core.ContainerContext_ProcessContext{ + ContainerStartTime: timestamppb.New(startTime), + ContainerEndTime: timestamppb.New(endTime), + }, + }, + }, + InitContainers: []*core.ContainerContext{ + { + ContainerName: initCont, + Process: &core.ContainerContext_ProcessContext{ + ContainerStartTime: timestamppb.New(startTime), + ContainerEndTime: timestamppb.New(endTime), + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + t.Run("TestGetTaskPhase_"+string(tc.rayJobPhase), func(t *testing.T) { + startTime := metav1.NewTime(time.Now()) + rayObject := &rayv1.RayJob{ + Spec: rayv1.RayJobSpec{ + RayClusterSpec: &rayv1.RayClusterSpec{ + HeadGroupSpec: rayv1.HeadGroupSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "ray-head", + Image: "rayproject/ray:latest", + Env: []corev1.EnvVar{}, + }, + }, + }, + }, + }, + }, + }, + Status: rayv1.RayJobStatus{ + JobDeploymentStatus: tc.rayJobPhase, + RayClusterName: "ray-clust", + StartTime: &startTime, + }, + } + phaseInfo, err := rayJobResourceHandler.GetTaskPhase(ctx, pluginCtx, rayObject) + if tc.expectedError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + assert.Equal(t, tc.expectedCorePhase.String(), phaseInfo.Phase().String()) + assert.Equal(t, logCtx, phaseInfo.Info().LogContext) + } + }) + } +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseInitializing, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + pluginCtx := rayPluginContext(pluginState) + + rayObject := &rayv1.RayJob{} + rayObject.Status.JobDeploymentStatus = rayv1.JobDeploymentStatusInitializing + phaseInfo, err := rayJobResourceHandler.GetTaskPhase(ctx, pluginCtx, rayObject) + + assert.NoError(t, err) + assert.Equal(t, phaseInfo.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func TestGetEventInfo_LogTemplates(t *testing.T) { + pluginCtx := rayPluginContext(k8s.PluginState{}) + testCases := []struct { + name string + rayJob rayv1.RayJob + logPlugin tasklog.TemplateLogPlugin + expectedTaskLogs []*core.TaskLog + }{ + { + name: "namespace", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "namespace", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "namespace", + Uri: "http://test/test-namespace", + Ready: true, + }, + }, + }, + { + name: "task execution ID", + rayJob: rayv1.RayJob{}, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "taskExecID", + TemplateURIs: []tasklog.TemplateURI{ + "http://test/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/attempt/{{ .taskRetryAttempt }}", + }, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "taskExecID", + Uri: "http://test/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/unique-node/taskId/my-task-name/attempt/1", + Ready: true, + }, + }, + }, + { + name: "ray cluster name", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + Status: rayv1.RayJobStatus{ + RayClusterName: "ray-cluster", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "ray cluster name", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}/{{ .rayClusterName }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "ray cluster name", + Uri: "http://test/test-namespace/ray-cluster", + Ready: true, + }, + }, + }, + { + name: "ray job ID", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + Status: rayv1.RayJobStatus{ + JobId: "ray-job-1", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "ray job ID", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}/{{ .rayJobID }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "ray job ID", + Uri: "http://test/test-namespace/ray-job-1", + Ready: true, + }, + }, + }, + } + + for i := range testCases { + tc := testCases[i] + t.Run(tc.name, func(t *testing.T) { + ti, err := getEventInfoForRayJob( + context.TODO(), + logs.LogConfig{Templates: []tasklog.TemplateLogPlugin{tc.logPlugin}}, + pluginCtx, + &tc.rayJob, + ) + assert.NoError(t, err) + assert.Equal(t, tc.expectedTaskLogs, ti.Logs) + }) + } +} + +func TestGetEventInfo_LogTemplates_V1(t *testing.T) { + pluginCtx := rayPluginContext(k8s.PluginState{}) + testCases := []struct { + name string + rayJob rayv1.RayJob + logPlugin tasklog.TemplateLogPlugin + expectedTaskLogs []*core.TaskLog + }{ + { + name: "namespace", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "namespace", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "namespace", + Uri: "http://test/test-namespace", + Ready: true, + }, + }, + }, + { + name: "task execution ID", + rayJob: rayv1.RayJob{}, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "taskExecID", + TemplateURIs: []tasklog.TemplateURI{ + "http://test/projects/{{ .executionProject }}/domains/{{ .executionDomain }}/executions/{{ .executionName }}/nodeId/{{ .nodeID }}/taskId/{{ .taskID }}/attempt/{{ .taskRetryAttempt }}", + }, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "taskExecID", + Uri: "http://test/projects/my-execution-project/domains/my-execution-domain/executions/my-execution-name/nodeId/unique-node/taskId/my-task-name/attempt/1", + Ready: true, + }, + }, + }, + { + name: "ray cluster name", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + Status: rayv1.RayJobStatus{ + RayClusterName: "ray-cluster", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "ray cluster name", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}/{{ .rayClusterName }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "ray cluster name", + Uri: "http://test/test-namespace/ray-cluster", + Ready: true, + }, + }, + }, + { + name: "ray job ID", + rayJob: rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "test-namespace", + }, + Status: rayv1.RayJobStatus{ + JobId: "ray-job-1", + }, + }, + logPlugin: tasklog.TemplateLogPlugin{ + DisplayName: "ray job ID", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{ .namespace }}/{{ .rayJobID }}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "ray job ID", + Uri: "http://test/test-namespace/ray-job-1", + Ready: true, + }, + }, + }, + } + + for i := range testCases { + tc := testCases[i] + t.Run(tc.name, func(t *testing.T) { + ti, err := getEventInfoForRayJob( + context.TODO(), + logs.LogConfig{Templates: []tasklog.TemplateLogPlugin{tc.logPlugin}}, + pluginCtx, + &tc.rayJob, + ) + assert.NoError(t, err) + assert.Equal(t, tc.expectedTaskLogs, ti.Logs) + }) + } +} + +func TestGetEventInfo_DashboardURL(t *testing.T) { + pluginCtx := rayPluginContext(k8s.PluginState{}) + testCases := []struct { + name string + rayJob rayv1.RayJob + dashboardURLTemplate tasklog.TemplateLogPlugin + expectedTaskLogs []*core.TaskLog + }{ + { + name: "dashboard URL displayed", + rayJob: rayv1.RayJob{ + Status: rayv1.RayJobStatus{ + DashboardURL: "exists", + JobStatus: rayv1.JobStatusRunning, + }, + }, + dashboardURLTemplate: tasklog.TemplateLogPlugin{ + DisplayName: "Ray Dashboard", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{.generatedName}}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "Ray Dashboard", + Uri: "http://test/generated-name", + LinkType: core.TaskLog_DASHBOARD, + Ready: true, + }, + }, + }, + { + name: "dashboard URL is not displayed", + rayJob: rayv1.RayJob{ + Status: rayv1.RayJobStatus{ + JobStatus: rayv1.JobStatusPending, + }, + }, + dashboardURLTemplate: tasklog.TemplateLogPlugin{ + DisplayName: "dummy", + TemplateURIs: []tasklog.TemplateURI{"http://dummy"}, + }, + expectedTaskLogs: nil, + }, + } + + for i := range testCases { + tc := testCases[i] + t.Run(tc.name, func(t *testing.T) { + assert.NoError(t, SetConfig(&Config{DashboardURLTemplate: &tc.dashboardURLTemplate})) + ti, err := getEventInfoForRayJob(context.TODO(), logs.LogConfig{}, pluginCtx, &tc.rayJob) + assert.NoError(t, err) + assert.Equal(t, tc.expectedTaskLogs, ti.Logs) + }) + } +} + +func TestGetEventInfo_DashboardURL_V1(t *testing.T) { + pluginCtx := rayPluginContext(k8s.PluginState{}) + testCases := []struct { + name string + rayJob rayv1.RayJob + dashboardURLTemplate tasklog.TemplateLogPlugin + expectedTaskLogs []*core.TaskLog + }{ + { + name: "dashboard URL displayed", + rayJob: rayv1.RayJob{ + Status: rayv1.RayJobStatus{ + DashboardURL: "exists", + JobStatus: rayv1.JobStatusRunning, + }, + }, + dashboardURLTemplate: tasklog.TemplateLogPlugin{ + DisplayName: "Ray Dashboard", + TemplateURIs: []tasklog.TemplateURI{"http://test/{{.generatedName}}"}, + }, + expectedTaskLogs: []*core.TaskLog{ + { + Name: "Ray Dashboard", + Uri: "http://test/generated-name", + LinkType: core.TaskLog_DASHBOARD, + Ready: true, + }, + }, + }, + { + name: "dashboard URL is not displayed", + rayJob: rayv1.RayJob{ + Status: rayv1.RayJobStatus{ + JobStatus: rayv1.JobStatusPending, + }, + }, + dashboardURLTemplate: tasklog.TemplateLogPlugin{ + DisplayName: "dummy", + TemplateURIs: []tasklog.TemplateURI{"http://dummy"}, + }, + expectedTaskLogs: nil, + }, + } + + for i := range testCases { + tc := testCases[i] + t.Run(tc.name, func(t *testing.T) { + assert.NoError(t, SetConfig(&Config{DashboardURLTemplate: &tc.dashboardURLTemplate})) + ti, err := getEventInfoForRayJob(context.TODO(), logs.LogConfig{}, pluginCtx, &tc.rayJob) + assert.NoError(t, err) + assert.Equal(t, tc.expectedTaskLogs, ti.Logs) + }) + } +} + +func TestGetPropertiesRay(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + maxLength := 47 + expected := k8s.PluginProperties{GeneratedNameMaxLength: &maxLength} + assert.Equal(t, expected, rayJobResourceHandler.GetProperties()) +} + +func rayPluginContext(pluginState k8s.PluginState) *k8smocks.PluginContext { + pluginCtx := newPluginContext(pluginState) + startTime := time.Date(2024, 0, 0, 0, 0, 0, 0, time.UTC) + endTime := startTime.Add(time.Hour) + podName, contName, initCont := "ray-clust-ray-head", "ray-head", "init" + podList := []runtime.Object{ + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: "initializing ignored pod"}, + Status: corev1.PodStatus{Phase: corev1.PodPending}, + }, + &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{Namespace: "ns", Name: podName}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: contName}, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodRunning, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: contName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: startTime}, + FinishedAt: metav1.Time{Time: endTime}, + }, + }, + }, + }, + InitContainerStatuses: []corev1.ContainerStatus{ + { + Name: initCont, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + StartedAt: metav1.Time{Time: startTime}, + FinishedAt: metav1.Time{Time: endTime}, + }, + }, + }, + }, + }, + }, + } + reader := fake.NewFakeClient(podList...) + pluginCtx.OnK8sReader().Return(reader) + return pluginCtx +} + +func transformStructToStructPB(t *testing.T, obj interface{}) *structpb.Struct { + data, err := json.Marshal(obj) + assert.Nil(t, err) + podSpecMap := make(map[string]interface{}) + err = json.Unmarshal(data, &podSpecMap) + assert.Nil(t, err) + s, err := structpb.NewStruct(podSpecMap) + assert.Nil(t, err) + return s +} + +func rayPluginContextWithPods(pluginState k8s.PluginState, pods ...runtime.Object) *k8smocks.PluginContext { + pluginCtx := newPluginContext(pluginState) + reader := fake.NewFakeClient(pods...) + pluginCtx.OnK8sReader().Return(reader) + return pluginCtx +} + +func TestGetTaskPhaseWithFailedPod(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + ctx := context.Background() + + rayJobName := "test-rayjob" + rayClusterName := "test-raycluster" + // Create a failed head pod - name must match pattern used in GetTaskPhase: {RayClusterName}-head + pod := &corev1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: rayClusterName + "-head", + Namespace: "ns", + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: RayHeadContainerName, + }, + }, + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: RayHeadContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + Message: "Container failed", + }, + }, + }, + }, + }, + } + + pluginCtx := rayPluginContextWithPods(k8s.PluginState{}, pod) + + startTime := metav1.NewTime(time.Now()) + rayObject := &rayv1.RayJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: rayJobName, + Namespace: "ns", + }, + Spec: rayv1.RayJobSpec{ + RayClusterSpec: &rayv1.RayClusterSpec{ + HeadGroupSpec: rayv1.HeadGroupSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: RayHeadContainerName, + Image: "rayproject/ray:latest", + Env: []corev1.EnvVar{}, + }, + }, + }, + }, + }, + }, + }, + Status: rayv1.RayJobStatus{ + JobDeploymentStatus: rayv1.JobDeploymentStatusRunning, + RayClusterName: rayClusterName, + StartTime: &startTime, + }, + } + + // Even though RayJob status is running, should return failure due to pod status + phaseInfo, err := rayJobResourceHandler.GetTaskPhase(ctx, pluginCtx, rayObject) + assert.NoError(t, err) + assert.True(t, phaseInfo.Phase().IsFailure()) +} + +func TestGetTaskPhaseContainerNameConstant(t *testing.T) { + rayJobResourceHandler := rayJobResourceHandler{} + ctx := context.Background() + pluginCtx := rayPluginContext(k8s.PluginState{}) + + startTime := metav1.NewTime(time.Now()) + rayObject := &rayv1.RayJob{ + Spec: rayv1.RayJobSpec{ + RayClusterSpec: &rayv1.RayClusterSpec{ + HeadGroupSpec: rayv1.HeadGroupSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: RayHeadContainerName, + Image: "rayproject/ray:latest", + Env: []corev1.EnvVar{}, + }, + }, + }, + }, + }, + }, + }, + Status: rayv1.RayJobStatus{ + JobDeploymentStatus: rayv1.JobDeploymentStatusComplete, + RayClusterName: "ray-clust", + StartTime: &startTime, + }, + } + + phaseInfo, err := rayJobResourceHandler.GetTaskPhase(ctx, pluginCtx, rayObject) + assert.NoError(t, err) + assert.NotNil(t, phaseInfo.Info()) + assert.NotNil(t, phaseInfo.Info().LogContext) + + // Verify the constant is used for head container names + assert.Equal(t, 1, len(phaseInfo.Info().LogContext.Pods)) + headPodLogContext := phaseInfo.Info().LogContext.Pods[0] + assert.Equal(t, RayHeadContainerName, headPodLogContext.PrimaryContainerName) + assert.Equal(t, 1, len(headPodLogContext.Containers)) + assert.Equal(t, RayHeadContainerName, headPodLogContext.Containers[0].ContainerName) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/ray/testdata/config.yaml b/flyteplugins/go/tasks/plugins/k8s/ray/testdata/config.yaml new file mode 100644 index 0000000000..cc5ddb5c33 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/ray/testdata/config.yaml @@ -0,0 +1,7 @@ +plugins: + ray: + remoteClusterConfig: + endpoint: 127.0.0.1 + auth: + tokenPath: /path/token + caCertPath: /path/cert \ No newline at end of file diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/config.go b/flyteplugins/go/tasks/plugins/k8s/spark/config.go new file mode 100644 index 0000000000..65525fff21 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/spark/config.go @@ -0,0 +1,51 @@ +package spark + +import ( + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = &Config{ + LogConfig: LogConfig{ + Mixed: logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesTemplateURI: "http://localhost:30082/#!/log/{{ .namespace }}/{{ .podName }}/pod?namespace={{ .namespace }}", + }, + }, + } + + sparkConfigSection = pluginsConfig.MustRegisterSubSection("spark", defaultConfig) +) + +// Spark-specific configs +type Config struct { + DefaultSparkConfig map[string]string `json:"spark-config-default" pflag:"-,Key value pairs of default spark configuration that should be applied to every SparkJob"` + SparkHistoryServerURL string `json:"spark-history-server-url" pflag:",URL for SparkHistory Server that each job will publish the execution history to."` + Features []Feature `json:"features" pflag:"-,List of optional features supported."` + LogConfig LogConfig `json:"logs" pflag:",Config for log links for spark applications."` +} + +type LogConfig struct { + Mixed logs.LogConfig `json:"mixed" pflag:",Defines the log config that's not split into user/system."` + User logs.LogConfig `json:"user" pflag:",Defines the log config for user logs."` + System logs.LogConfig `json:"system" pflag:",Defines the log config for system logs."` + AllUser logs.LogConfig `json:"all-user" pflag:",All user logs across driver and executors."` +} + +// Optional feature with name and corresponding spark-config to use. +type Feature struct { + Name string `json:"name"` + SparkConfig map[string]string `json:"spark-config"` +} + +func GetSparkConfig() *Config { + return sparkConfigSection.GetConfig().(*Config) +} + +// This method should be used for unit testing only +func setSparkConfig(cfg *Config) error { + return sparkConfigSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/config_flags.go b/flyteplugins/go/tasks/plugins/k8s/spark/config_flags.go new file mode 100755 index 0000000000..d5d6945f71 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/spark/config_flags.go @@ -0,0 +1,99 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package spark + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "spark-history-server-url"), defaultConfig.SparkHistoryServerURL, "URL for SparkHistory Server that each job will publish the execution history to.") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.mixed.cloudwatch-enabled"), defaultConfig.LogConfig.Mixed.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.cloudwatch-region"), defaultConfig.LogConfig.Mixed.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.cloudwatch-log-group"), defaultConfig.LogConfig.Mixed.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.cloudwatch-template-uri"), defaultConfig.LogConfig.Mixed.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.mixed.kubernetes-enabled"), defaultConfig.LogConfig.Mixed.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.kubernetes-url"), defaultConfig.LogConfig.Mixed.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.kubernetes-template-uri"), defaultConfig.LogConfig.Mixed.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.mixed.stackdriver-enabled"), defaultConfig.LogConfig.Mixed.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.gcp-project"), defaultConfig.LogConfig.Mixed.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.stackdriver-logresourcename"), defaultConfig.LogConfig.Mixed.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.mixed.stackdriver-template-uri"), defaultConfig.LogConfig.Mixed.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.user.cloudwatch-enabled"), defaultConfig.LogConfig.User.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.cloudwatch-region"), defaultConfig.LogConfig.User.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.cloudwatch-log-group"), defaultConfig.LogConfig.User.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.cloudwatch-template-uri"), defaultConfig.LogConfig.User.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.user.kubernetes-enabled"), defaultConfig.LogConfig.User.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.kubernetes-url"), defaultConfig.LogConfig.User.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.kubernetes-template-uri"), defaultConfig.LogConfig.User.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.user.stackdriver-enabled"), defaultConfig.LogConfig.User.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.gcp-project"), defaultConfig.LogConfig.User.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.stackdriver-logresourcename"), defaultConfig.LogConfig.User.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.user.stackdriver-template-uri"), defaultConfig.LogConfig.User.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.system.cloudwatch-enabled"), defaultConfig.LogConfig.System.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.cloudwatch-region"), defaultConfig.LogConfig.System.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.cloudwatch-log-group"), defaultConfig.LogConfig.System.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.cloudwatch-template-uri"), defaultConfig.LogConfig.System.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.system.kubernetes-enabled"), defaultConfig.LogConfig.System.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.kubernetes-url"), defaultConfig.LogConfig.System.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.kubernetes-template-uri"), defaultConfig.LogConfig.System.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.system.stackdriver-enabled"), defaultConfig.LogConfig.System.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.gcp-project"), defaultConfig.LogConfig.System.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.stackdriver-logresourcename"), defaultConfig.LogConfig.System.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.system.stackdriver-template-uri"), defaultConfig.LogConfig.System.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.all-user.cloudwatch-enabled"), defaultConfig.LogConfig.AllUser.IsCloudwatchEnabled, "Enable Cloudwatch Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.cloudwatch-region"), defaultConfig.LogConfig.AllUser.CloudwatchRegion, "AWS region in which Cloudwatch logs are stored.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.cloudwatch-log-group"), defaultConfig.LogConfig.AllUser.CloudwatchLogGroup, "Log group to which streams are associated.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.cloudwatch-template-uri"), defaultConfig.LogConfig.AllUser.CloudwatchTemplateURI, "Template Uri to use when building cloudwatch log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.all-user.kubernetes-enabled"), defaultConfig.LogConfig.AllUser.IsKubernetesEnabled, "Enable Kubernetes Logging") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.kubernetes-url"), defaultConfig.LogConfig.AllUser.KubernetesURL, "Console URL for Kubernetes logs") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.kubernetes-template-uri"), defaultConfig.LogConfig.AllUser.KubernetesTemplateURI, "Template Uri to use when building kubernetes log links") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "logs.all-user.stackdriver-enabled"), defaultConfig.LogConfig.AllUser.IsStackDriverEnabled, "Enable Log-links to stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.gcp-project"), defaultConfig.LogConfig.AllUser.GCPProjectName, "Name of the project in GCP") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.stackdriver-logresourcename"), defaultConfig.LogConfig.AllUser.StackdriverLogResourceName, "Name of the logresource in stackdriver") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "logs.all-user.stackdriver-template-uri"), defaultConfig.LogConfig.AllUser.StackDriverTemplateURI, "Template Uri to use when building stackdriver log links") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/config_flags_test.go b/flyteplugins/go/tasks/plugins/k8s/spark/config_flags_test.go new file mode 100755 index 0000000000..ea8659a48a --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/spark/config_flags_test.go @@ -0,0 +1,732 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package spark + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_spark-history-server-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("spark-history-server-url", testValue) + if vString, err := cmdFlags.GetString("spark-history-server-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.SparkHistoryServerURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.mixed.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.Mixed.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.mixed.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.Mixed.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.mixed.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.Mixed.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.mixed.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.mixed.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.mixed.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.Mixed.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.user.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.User.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.user.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.user.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.user.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.user.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.User.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.user.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.user.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.user.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.User.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.user.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.user.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.user.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.user.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.user.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.User.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.system.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.System.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.system.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.system.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.system.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.system.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.System.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.system.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.system.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.system.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.System.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.system.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.system.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.system.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.system.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.system.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.System.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.cloudwatch-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.cloudwatch-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.all-user.cloudwatch-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.AllUser.IsCloudwatchEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.cloudwatch-region", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.cloudwatch-region", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.cloudwatch-region"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.CloudwatchRegion) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.cloudwatch-log-group", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.cloudwatch-log-group", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.cloudwatch-log-group"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.CloudwatchLogGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.cloudwatch-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.cloudwatch-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.cloudwatch-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.CloudwatchTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.kubernetes-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.kubernetes-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.all-user.kubernetes-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.AllUser.IsKubernetesEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.kubernetes-url", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.kubernetes-url", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.kubernetes-url"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.KubernetesURL) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.kubernetes-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.kubernetes-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.kubernetes-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.KubernetesTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.stackdriver-enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.stackdriver-enabled", testValue) + if vBool, err := cmdFlags.GetBool("logs.all-user.stackdriver-enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.LogConfig.AllUser.IsStackDriverEnabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.gcp-project", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.gcp-project", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.gcp-project"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.GCPProjectName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.stackdriver-logresourcename", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.stackdriver-logresourcename", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.stackdriver-logresourcename"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.StackdriverLogResourceName) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_logs.all-user.stackdriver-template-uri", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("logs.all-user.stackdriver-template-uri", testValue) + if vString, err := cmdFlags.GetString("logs.all-user.stackdriver-template-uri"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.LogConfig.AllUser.StackDriverTemplateURI) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go new file mode 100644 index 0000000000..6021a5d56b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark.go @@ -0,0 +1,621 @@ +package spark + +import ( + "context" + "fmt" + "regexp" + "strconv" + "strings" + "time" + + sparkOp "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkOpConfig "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/config" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes/scheme" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/tasklog" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const KindSparkApplication = "SparkApplication" +const sparkDriverUI = "sparkDriverUI" +const sparkHistoryUI = "sparkHistoryUI" +const defaultDriverPrimaryContainerName = "spark-kubernetes-driver" + +var featureRegex = regexp.MustCompile(`^spark.((flyteorg)|(flyte)).(.+).enabled$`) + +var sparkTaskType = "spark" + +type sparkResourceHandler struct { +} + +func validateSparkJob(sparkJob *plugins.SparkJob) error { + if sparkJob == nil { + return fmt.Errorf("empty sparkJob") + } + + if len(sparkJob.MainApplicationFile) == 0 && len(sparkJob.MainClass) == 0 { + return fmt.Errorf("either MainApplicationFile or MainClass must be set") + } + + return nil +} + +func (sparkResourceHandler) GetProperties() k8s.PluginProperties { + return k8s.PluginProperties{} +} + +// Creates a new Job that will execute the main container as well as any generated types the result from the execution. +func (sparkResourceHandler) BuildResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext) (client.Object, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "unable to fetch task specification [%v]", err.Error()) + } else if taskTemplate == nil { + return nil, errors.Errorf(errors.BadTaskSpecification, "nil task specification") + } + + sparkJob := plugins.SparkJob{} + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &sparkJob) + if err != nil { + return nil, errors.Wrapf(errors.BadTaskSpecification, err, "invalid TaskSpecification [%v], failed to unmarshal", taskTemplate.GetCustom()) + } + + if err = validateSparkJob(&sparkJob); err != nil { + return nil, errors.Wrapf(errors.BadTaskSpecification, err, "invalid TaskSpecification [%v].", taskTemplate.GetCustom()) + } + + sparkConfig := getSparkConfig(taskCtx, &sparkJob) + driverSpec, err := createDriverSpec(ctx, taskCtx, sparkConfig, &sparkJob) + if err != nil { + return nil, err + } + executorSpec, err := createExecutorSpec(ctx, taskCtx, sparkConfig, &sparkJob) + if err != nil { + return nil, err + } + app := createSparkApplication(&sparkJob, sparkConfig, driverSpec, executorSpec) + return app, nil +} + +func getSparkConfig(taskCtx pluginsCore.TaskExecutionContext, sparkJob *plugins.SparkJob) map[string]string { + // Start with default config values. + sparkConfig := make(map[string]string) + for k, v := range GetSparkConfig().DefaultSparkConfig { + sparkConfig[k] = v + } + + if sparkJob.GetExecutorPath() != "" { + sparkConfig["spark.pyspark.python"] = sparkJob.GetExecutorPath() + sparkConfig["spark.pyspark.driver.python"] = sparkJob.GetExecutorPath() + } + + for k, v := range sparkJob.GetSparkConf() { + // Add optional features if present. + if featureRegex.MatchString(k) { + addConfig(sparkConfig, k, v) + } else { + sparkConfig[k] = v + } + } + + // Set pod limits. + if len(sparkConfig[sparkOpConfig.SparkDriverCoreLimitKey]) == 0 { + // spark.kubernetes.driver.request.cores takes precedence over spark.driver.cores + if len(sparkConfig[sparkOpConfig.SparkDriverCoreRequestKey]) != 0 { + sparkConfig[sparkOpConfig.SparkDriverCoreLimitKey] = sparkConfig[sparkOpConfig.SparkDriverCoreRequestKey] + } else if len(sparkConfig["spark.driver.cores"]) != 0 { + sparkConfig[sparkOpConfig.SparkDriverCoreLimitKey] = sparkConfig["spark.driver.cores"] + } + } + + if len(sparkConfig[sparkOpConfig.SparkExecutorCoreLimitKey]) == 0 { + // spark.kubernetes.executor.request.cores takes precedence over spark.executor.cores + if len(sparkConfig[sparkOpConfig.SparkExecutorCoreRequestKey]) != 0 { + sparkConfig[sparkOpConfig.SparkExecutorCoreLimitKey] = sparkConfig[sparkOpConfig.SparkExecutorCoreRequestKey] + } else if len(sparkConfig["spark.executor.cores"]) != 0 { + sparkConfig[sparkOpConfig.SparkExecutorCoreLimitKey] = sparkConfig["spark.executor.cores"] + } + } + + sparkConfig["spark.kubernetes.executor.podNamePrefix"] = taskCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + sparkConfig["spark.kubernetes.driverEnv.FLYTE_START_TIME"] = strconv.FormatInt(time.Now().UnixNano()/1000000, 10) + + return sparkConfig +} + +func serviceAccountName(metadata pluginsCore.TaskExecutionMetadata) string { + name := flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(metadata) + if len(name) == 0 { + name = sparkTaskType + } + return name +} + +func createSparkPodSpec(taskCtx pluginsCore.TaskExecutionContext, podSpec *v1.PodSpec, container *v1.Container) *sparkOp.SparkPodSpec { + annotations := utils.UnionMaps(config.GetK8sPluginConfig().DefaultAnnotations, utils.CopyMap(taskCtx.TaskExecutionMetadata().GetAnnotations())) + labels := utils.UnionMaps(config.GetK8sPluginConfig().DefaultLabels, utils.CopyMap(taskCtx.TaskExecutionMetadata().GetLabels())) + + sparkEnv := make([]v1.EnvVar, 0) + for _, envVar := range container.Env { + sparkEnv = append(sparkEnv, *envVar.DeepCopy()) + } + sparkEnv = append(sparkEnv, v1.EnvVar{Name: "FLYTE_MAX_ATTEMPTS", Value: strconv.Itoa(int(taskCtx.TaskExecutionMetadata().GetMaxAttempts()))}) + + spec := sparkOp.SparkPodSpec{ + Affinity: podSpec.Affinity, + Annotations: annotations, + Labels: labels, + Env: sparkEnv, + Image: &container.Image, + SecurityContenxt: podSpec.SecurityContext.DeepCopy(), + DNSConfig: podSpec.DNSConfig.DeepCopy(), + Tolerations: podSpec.Tolerations, + SchedulerName: &podSpec.SchedulerName, + NodeSelector: podSpec.NodeSelector, + HostNetwork: &podSpec.HostNetwork, + } + return &spec +} + +type driverSpec struct { + sparkSpec *sparkOp.DriverSpec +} + +func createDriverSpec(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext, sparkConfig map[string]string, sparkJob *plugins.SparkJob) (*driverSpec, error) { + // Spark driver pods should always run as non-interruptible + nonInterruptibleTaskCtx := flytek8s.NewPluginTaskExecutionContext(taskCtx, flytek8s.WithInterruptible(false)) + podSpec, _, primaryContainerName, err := flytek8s.ToK8sPodSpec(ctx, nonInterruptibleTaskCtx) + if err != nil { + return nil, err + } + + driverPod := sparkJob.GetDriverPod() + if driverPod != nil { + if driverPod.GetPodSpec() != nil { + var customPodSpec *v1.PodSpec + + err = utils.UnmarshalStructToObj(driverPod.GetPodSpec(), &customPodSpec) + if err != nil { + return nil, errors.Errorf(errors.BadTaskSpecification, + "Unable to unmarshal driver pod spec [%v], Err: [%v]", driverPod.GetPodSpec(), err.Error()) + } + + podSpec, err = flytek8s.MergeOverlayPodSpecOntoBase(podSpec, customPodSpec) + if err != nil { + return nil, err + } + } + + if driverPod.GetPrimaryContainerName() != "" { + primaryContainerName = driverPod.GetPrimaryContainerName() + } + } + + primaryContainer, err := flytek8s.GetContainer(podSpec, primaryContainerName) + if err != nil { + return nil, err + } + sparkPodSpec := createSparkPodSpec(nonInterruptibleTaskCtx, podSpec, primaryContainer) + serviceAccountName := serviceAccountName(nonInterruptibleTaskCtx.TaskExecutionMetadata()) + spec := driverSpec{ + &sparkOp.DriverSpec{ + SparkPodSpec: *sparkPodSpec, + ServiceAccount: &serviceAccountName, + }, + } + if cores, err := strconv.ParseInt(sparkConfig["spark.driver.cores"], 10, 32); err == nil { + spec.sparkSpec.Cores = intPtr(int32(cores)) + } + spec.sparkSpec.Memory = strPtr(sparkConfig["spark.driver.memory"]) + return &spec, nil +} + +type executorSpec struct { + container *v1.Container + sparkSpec *sparkOp.ExecutorSpec + serviceAccountName string +} + +func createExecutorSpec(ctx context.Context, taskCtx pluginsCore.TaskExecutionContext, sparkConfig map[string]string, sparkJob *plugins.SparkJob) (*executorSpec, error) { + podSpec, _, primaryContainerName, err := flytek8s.ToK8sPodSpec(ctx, taskCtx) + if err != nil { + return nil, err + } + + executorPod := sparkJob.GetExecutorPod() + if executorPod != nil { + if executorPod.GetPodSpec() != nil { + var customPodSpec *v1.PodSpec + + err = utils.UnmarshalStructToObj(executorPod.GetPodSpec(), &customPodSpec) + if err != nil { + return nil, errors.Errorf(errors.BadTaskSpecification, + "Unable to unmarshal executor pod spec [%v], Err: [%v]", executorPod.GetPodSpec(), err.Error()) + } + + podSpec, err = flytek8s.MergeOverlayPodSpecOntoBase(podSpec, customPodSpec) + if err != nil { + return nil, err + } + } + if executorPod.GetPrimaryContainerName() != "" { + primaryContainerName = executorPod.GetPrimaryContainerName() + } + } + + primaryContainer, err := flytek8s.GetContainer(podSpec, primaryContainerName) + if err != nil { + return nil, err + } + sparkPodSpec := createSparkPodSpec(taskCtx, podSpec, primaryContainer) + serviceAccountName := serviceAccountName(taskCtx.TaskExecutionMetadata()) + spec := executorSpec{ + primaryContainer, + &sparkOp.ExecutorSpec{ + SparkPodSpec: *sparkPodSpec, + }, + serviceAccountName, + } + if execCores, err := strconv.ParseInt(sparkConfig["spark.executor.cores"], 10, 32); err == nil { + spec.sparkSpec.Cores = intPtr(int32(execCores)) + } + if execCount, err := strconv.ParseInt(sparkConfig["spark.executor.instances"], 10, 32); err == nil { + spec.sparkSpec.Instances = intPtr(int32(execCount)) + } + spec.sparkSpec.Memory = strPtr(sparkConfig["spark.executor.memory"]) + return &spec, nil +} + +func createSparkApplication(sparkJob *plugins.SparkJob, sparkConfig map[string]string, driverSpec *driverSpec, + executorSpec *executorSpec) *sparkOp.SparkApplication { + // Hack: Retry submit failures in-case of resource limits hit. + submissionFailureRetries := int32(14) + + app := &sparkOp.SparkApplication{ + TypeMeta: metav1.TypeMeta{ + Kind: KindSparkApplication, + APIVersion: sparkOp.SchemeGroupVersion.String(), + }, + Spec: sparkOp.SparkApplicationSpec{ + ServiceAccount: &executorSpec.serviceAccountName, + Type: getApplicationType(sparkJob.GetApplicationType()), + Image: &executorSpec.container.Image, + Arguments: executorSpec.container.Args, + Driver: *driverSpec.sparkSpec, + Executor: *executorSpec.sparkSpec, + SparkConf: sparkConfig, + HadoopConf: sparkJob.GetHadoopConf(), + // SubmissionFailures handled here. Task Failures handled at Propeller/Job level. + RestartPolicy: sparkOp.RestartPolicy{ + Type: sparkOp.OnFailure, + OnSubmissionFailureRetries: &submissionFailureRetries, + }, + }, + } + + if val, ok := sparkConfig["spark.batchScheduler"]; ok { + app.Spec.BatchScheduler = &val + } + + if sparkJob.MainApplicationFile != "" { + app.Spec.MainApplicationFile = &sparkJob.MainApplicationFile + } + if sparkJob.MainClass != "" { + app.Spec.MainClass = &sparkJob.MainClass + } + return app +} + +func addConfig(sparkConfig map[string]string, key string, value string) { + + if strings.ToLower(strings.TrimSpace(value)) != "true" { + sparkConfig[key] = value + return + } + + matches := featureRegex.FindAllStringSubmatch(key, -1) + if len(matches) == 0 || len(matches[0]) == 0 { + sparkConfig[key] = value + return + } + featureName := matches[0][len(matches[0])-1] + + // Use the first matching feature in-case of duplicates. + for _, feature := range GetSparkConfig().Features { + if feature.Name == featureName { + for k, v := range feature.SparkConfig { + sparkConfig[k] = v + } + return + } + } + sparkConfig[key] = value +} + +// Convert SparkJob ApplicationType to Operator CRD ApplicationType +func getApplicationType(applicationType plugins.SparkApplication_Type) sparkOp.SparkApplicationType { + switch applicationType { + case plugins.SparkApplication_PYTHON: + return sparkOp.PythonApplicationType + case plugins.SparkApplication_JAVA: + return sparkOp.JavaApplicationType + case plugins.SparkApplication_SCALA: + return sparkOp.ScalaApplicationType + case plugins.SparkApplication_R: + return sparkOp.RApplicationType + } + return sparkOp.PythonApplicationType +} + +func (sparkResourceHandler) BuildIdentityResource(ctx context.Context, taskCtx pluginsCore.TaskExecutionMetadata) (client.Object, error) { + return &sparkOp.SparkApplication{ + TypeMeta: metav1.TypeMeta{ + Kind: KindSparkApplication, + APIVersion: sparkOp.SchemeGroupVersion.String(), + }, + }, nil +} + +func getEventInfoForSpark(ctx context.Context, pluginContext k8s.PluginContext, sj *sparkOp.SparkApplication) (*pluginsCore.TaskInfo, error) { + + sparkConfig := GetSparkConfig() + taskLogs := make([]*core.TaskLog, 0, 3) + var logCtx *core.LogContext + taskExecID := pluginContext.TaskExecutionMetadata().GetTaskExecutionID() + + if sj.Status.DriverInfo.PodName != "" { + p, err := logs.InitializeLogPlugins(&sparkConfig.LogConfig.Mixed) + if err != nil { + return nil, err + } + + if p != nil { + o, err := p.GetTaskLogs(tasklog.Input{ + PodName: sj.Status.DriverInfo.PodName, + Namespace: sj.Namespace, + TaskExecutionID: taskExecID, + EnableVscode: flytek8s.IsVscodeEnabled(ctx, sj.Spec.Driver.Env), + }) + + if err != nil { + return nil, err + } + + taskLogs = append(taskLogs, o.TaskLogs...) + } + } + + p, err := logs.InitializeLogPlugins(&sparkConfig.LogConfig.User) + if err != nil { + return nil, err + } + + if p != nil { + o, err := p.GetTaskLogs(tasklog.Input{ + PodName: sj.Status.DriverInfo.PodName, + Namespace: sj.Namespace, + TaskExecutionID: taskExecID, + }) + + if err != nil { + return nil, err + } + + taskLogs = append(taskLogs, o.TaskLogs...) + } + + logCtx = &core.LogContext{ + PrimaryPodName: sj.Status.DriverInfo.PodName, + } + logCtx.Pods = append(logCtx.Pods, &core.PodLogContext{ + Namespace: sj.Namespace, + PodName: sj.Status.DriverInfo.PodName, + PrimaryContainerName: defaultDriverPrimaryContainerName, + Containers: []*core.ContainerContext{ + {ContainerName: defaultDriverPrimaryContainerName}, + }, + }) + + for executorPodName, executorState := range sj.Status.ExecutorState { + if executorState != sparkOp.ExecutorPendingState && executorState != sparkOp.ExecutorUnknownState { + logCtx.Pods = append(logCtx.Pods, &core.PodLogContext{ + Namespace: sj.Namespace, + PodName: executorPodName, + PrimaryContainerName: "spark-kubernetes-executor", + Containers: []*core.ContainerContext{ + {ContainerName: "spark-kubernetes-executor"}, + }, + }) + } + } + + p, err = logs.InitializeLogPlugins(&sparkConfig.LogConfig.System) + if err != nil { + return nil, err + } + + if p != nil { + o, err := p.GetTaskLogs(tasklog.Input{ + PodName: sj.Name, + Namespace: sj.Namespace, + TaskExecutionID: taskExecID, + }) + + if err != nil { + return nil, err + } + + taskLogs = append(taskLogs, o.TaskLogs...) + } + + p, err = logs.InitializeLogPlugins(&sparkConfig.LogConfig.AllUser) + if err != nil { + return nil, err + } + + if p != nil { + o, err := p.GetTaskLogs(tasklog.Input{ + PodName: sj.Name, + Namespace: sj.Namespace, + TaskExecutionID: taskExecID, + }) + + if err != nil { + return nil, err + } + + // "All user" logs are shown already in the queuing and initializing phase. + for _, log := range o.TaskLogs { + log.ShowWhilePending = true + } + + taskLogs = append(taskLogs, o.TaskLogs...) + } + + customInfoMap := make(map[string]string) + + // Spark UI. + if sj.Status.AppState.State == sparkOp.FailedState || sj.Status.AppState.State == sparkOp.CompletedState { + if sj.Status.SparkApplicationID != "" && GetSparkConfig().SparkHistoryServerURL != "" { + customInfoMap[sparkHistoryUI] = fmt.Sprintf("%s/history/%s", GetSparkConfig().SparkHistoryServerURL, sj.Status.SparkApplicationID) + // Custom doesn't work unless the UI has a custom plugin to parse this, hence add to Logs as well. + taskLogs = append(taskLogs, &core.TaskLog{ + Uri: customInfoMap[sparkHistoryUI], + Name: "Spark History UI", + Ready: true, + MessageFormat: core.TaskLog_JSON, + LinkType: core.TaskLog_DASHBOARD, + }) + } + } else if sj.Status.AppState.State == sparkOp.RunningState && sj.Status.DriverInfo.WebUIIngressAddress != "" { + // Older versions of spark-operator does not append http:// but newer versions do. + uri := sj.Status.DriverInfo.WebUIIngressAddress + if !strings.HasPrefix(uri, "https://") && !strings.HasPrefix(uri, "http://") { + uri = fmt.Sprintf("https://%s", uri) + } + customInfoMap[sparkDriverUI] = uri + + // Custom doesn't work unless the UI has a custom plugin to parse this, hence add to Logs as well. + taskLogs = append(taskLogs, &core.TaskLog{ + Uri: customInfoMap[sparkDriverUI], + Name: "Spark Driver UI", + Ready: true, + MessageFormat: core.TaskLog_JSON, + LinkType: core.TaskLog_DASHBOARD, + }) + } + + customInfo, err := utils.MarshalObjToStruct(customInfoMap) + if err != nil { + return nil, err + } + + return &pluginsCore.TaskInfo{ + Logs: taskLogs, + LogContext: logCtx, + CustomInfo: customInfo, + }, nil +} + +func (sparkResourceHandler) GetTaskPhase(ctx context.Context, pluginContext k8s.PluginContext, resource client.Object) (pluginsCore.PhaseInfo, error) { + + app := resource.(*sparkOp.SparkApplication) + info, err := getEventInfoForSpark(ctx, pluginContext, app) + if err != nil { + return pluginsCore.PhaseInfoUndefined, err + } + + phaseInfo, err := flytek8s.DemystifyFailedOrPendingPod(ctx, pluginContext, *info, app.Namespace, app.Status.DriverInfo.PodName, defaultDriverPrimaryContainerName) + if err != nil { + logger.Errorf(ctx, "Failed to demystify pod status for spark driver. Error: %v", err) + } + if phaseInfo.Phase().IsFailure() { + // If the spark driver pod is in a failure state, we can fail fast without checking the SparkJob status. + return phaseInfo, nil + } + occurredAt := time.Now() + switch app.Status.AppState.State { + case sparkOp.NewState: + phaseInfo = pluginsCore.PhaseInfoQueuedWithTaskInfo(occurredAt, pluginsCore.DefaultPhaseVersion, "job queued", info) + case sparkOp.SubmittedState, sparkOp.PendingSubmissionState: + phaseInfo = pluginsCore.PhaseInfoInitializing(occurredAt, pluginsCore.DefaultPhaseVersion, "job submitted", info) + case sparkOp.FailedSubmissionState: + reason := fmt.Sprintf("Spark Job Submission Failed with Error: %s", app.Status.AppState.ErrorMessage) + phaseInfo = pluginsCore.PhaseInfoRetryableFailure(errors.DownstreamSystemError, reason, info) + case sparkOp.FailedState: + reason := fmt.Sprintf("Spark Job Failed with Error: %s", app.Status.AppState.ErrorMessage) + phaseInfo = pluginsCore.PhaseInfoRetryableFailure(errors.DownstreamSystemError, reason, info) + case sparkOp.CompletedState: + phaseInfo = pluginsCore.PhaseInfoSuccess(info) + default: + phaseInfo = pluginsCore.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, info) + } + + for _, tl := range info.Logs { + // TODO: Add readiness probe for spark driver pod. Need to upgrade spark-operator client version. + if tl != nil && tl.LinkType == core.TaskLog_DASHBOARD && strings.Contains(tl.Name, "Spark Driver UI") { + if phaseInfo.Phase() != pluginsCore.PhaseRunning { + tl.Ready = false + phaseInfo.WithReason("Spark driver UI is not ready") + } else { + tl.Ready = true + phaseInfo.WithReason("Spark driver UI is ready") + } + + } else if tl != nil && tl.LinkType == core.TaskLog_IDE { + if phaseInfo.Phase() != pluginsCore.PhaseRunning { + phaseInfo.WithReason("Vscode server is not ready") + } else { + phaseInfo.WithReason("Vscode server is ready") + } + } + } + + phaseVersionUpdateErr := k8s.MaybeUpdatePhaseVersionFromPluginContext(&phaseInfo, &pluginContext) + if phaseVersionUpdateErr != nil { + return phaseInfo, phaseVersionUpdateErr + } + + return phaseInfo, nil +} + +func init() { + if err := sparkOp.AddToScheme(scheme.Scheme); err != nil { + panic(err) + } + + pluginmachinery.PluginRegistry().RegisterK8sPlugin( + k8s.PluginEntry{ + ID: sparkTaskType, + RegisteredTaskTypes: []pluginsCore.TaskType{sparkTaskType}, + ResourceToWatch: &sparkOp.SparkApplication{}, + Plugin: sparkResourceHandler{}, + IsDefault: false, + }) +} + +func strPtr(str string) *string { + if str == "" { + return nil + } + return &str +} + +func intPtr(val int32) *int32 { + if val == 0 { + return nil + } + return &val +} diff --git a/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go new file mode 100644 index 0000000000..ecce7e9911 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/k8s/spark/spark_test.go @@ -0,0 +1,1182 @@ +package spark + +import ( + "context" + "os" + "reflect" + "strconv" + "testing" + "time" + + sj "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + sparkOp "github.com/GoogleCloudPlatform/spark-on-k8s-operator/pkg/apis/sparkoperator.k8s.io/v1beta2" + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/logs" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s/config" + pluginIOMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s" + k8smocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/k8s/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + stdlibUtils "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const sparkMainClass = "MainClass" +const sparkApplicationFile = "local:///spark_app.py" +const testImage = "image://" +const sparkUIAddress = "https://spark-ui.flyte" + +var ( + dummySparkConf = map[string]string{ + "spark.driver.memory": "200M", + "spark.driver.cores": "1", + "spark.executor.cores": "2", + "spark.executor.instances": "3", + "spark.executor.memory": "500M", + "spark.flyte.feature1.enabled": "true", + "spark.flyteorg.feature2.enabled": "true", + "spark.flyteorg.feature3.enabled": "true", + "spark.batchScheduler": "volcano", + } + + dummyEnvVars = []*core.KeyValuePair{ + {Key: "Env_Var", Value: "Env_Val"}, + } + + dummyEnvVarsWithSecretRef = []corev1.EnvVar{ + {Name: "Env_Var", Value: "Env_Val"}, + {Name: "SECRET", ValueFrom: &corev1.EnvVarSource{ + SecretKeyRef: &corev1.SecretKeySelector{ + Key: "key", + LocalObjectReference: corev1.LocalObjectReference{ + Name: "secret-name", + }, + }, + }}, + } + + testArgs = []string{ + "execute-spark-task", + } +) + +func TestGetApplicationType(t *testing.T) { + assert.Equal(t, getApplicationType(plugins.SparkApplication_PYTHON), sj.PythonApplicationType) + assert.Equal(t, getApplicationType(plugins.SparkApplication_R), sj.RApplicationType) + assert.Equal(t, getApplicationType(plugins.SparkApplication_JAVA), sj.JavaApplicationType) + assert.Equal(t, getApplicationType(plugins.SparkApplication_SCALA), sj.ScalaApplicationType) +} + +func TestGetEventInfo(t *testing.T) { + assert.NoError(t, setSparkConfig(&Config{ + LogConfig: LogConfig{ + User: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + }, + System: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=system_log.var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + }, + AllUser: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + }, + Mixed: logs.LogConfig{ + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + }, + }, + })) + pluginContext := dummySparkPluginContext(dummySparkTaskTemplateContainer("blah-1", dummySparkConf), k8s.PluginState{}) + info, err := getEventInfoForSpark(context.TODO(), pluginContext, dummySparkApplication(sj.RunningState)) + assert.NoError(t, err) + assert.Len(t, info.Logs, 6) + assert.Equal(t, "https://spark-ui.flyte", info.CustomInfo.Fields[sparkDriverUI].GetStringValue()) + generatedLinks := make([]string, 0, len(info.Logs)) + for _, l := range info.Logs { + generatedLinks = append(generatedLinks, l.Uri) + } + + expectedLinks := []string{ + "k8s.com/#!/log/spark-namespace/spark-pod/pod?namespace=spark-namespace", + "k8s.com/#!/log/spark-namespace/spark-pod/pod?namespace=spark-namespace", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.spark-pod;streamFilter=typeLogStreamPrefix", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=system_log.var.log.containers.spark-app-name;streamFilter=typeLogStreamPrefix", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.spark-app-name;streamFilter=typeLogStreamPrefix", + "https://spark-ui.flyte", + } + + assert.Equal(t, expectedLinks, generatedLinks) + + info, err = getEventInfoForSpark(context.TODO(), pluginContext, dummySparkApplication(sj.SubmittedState)) + generatedLinks = make([]string, 0, len(info.Logs)) + for _, l := range info.Logs { + generatedLinks = append(generatedLinks, l.Uri) + } + assert.NoError(t, err) + assert.Len(t, info.Logs, 5) + assert.Equal(t, expectedLinks[:5], generatedLinks) // No Spark Driver UI for Submitted state + assert.True(t, info.Logs[4].ShowWhilePending) // All User Logs should be shown while pending + generatedLinks = make([]string, 0, len(info.Logs)) + for _, l := range info.Logs { + generatedLinks = append(generatedLinks, l.Uri) + } + assert.NoError(t, err) + assert.Len(t, info.Logs, 5) + assert.Equal(t, expectedLinks[:5], generatedLinks) // No Spark Driver UI for Submitted state + assert.True(t, info.Logs[4].ShowWhilePending) // All User Logs should be shown while pending + + assert.NoError(t, setSparkConfig(&Config{ + SparkHistoryServerURL: "spark-history.flyte", + LogConfig: LogConfig{ + User: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + IsKubernetesEnabled: true, + KubernetesURL: "k8s.com", + }, + System: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=system_log.var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + }, + AllUser: logs.LogConfig{ + IsCloudwatchEnabled: true, + CloudwatchTemplateURI: "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.{{ .podName }};streamFilter=typeLogStreamPrefix", + }, + }, + })) + + info, err = getEventInfoForSpark(context.TODO(), pluginContext, dummySparkApplication(sj.FailedState)) + assert.NoError(t, err) + assert.Len(t, info.Logs, 5) + assert.Equal(t, "spark-history.flyte/history/app-id", info.CustomInfo.Fields[sparkHistoryUI].GetStringValue()) + generatedLinks = make([]string, 0, len(info.Logs)) + for _, l := range info.Logs { + generatedLinks = append(generatedLinks, l.Uri) + } + + expectedLinks = []string{ + "k8s.com/#!/log/spark-namespace/spark-pod/pod?namespace=spark-namespace", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.spark-pod;streamFilter=typeLogStreamPrefix", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=system_log.var.log.containers.spark-app-name;streamFilter=typeLogStreamPrefix", + "https://console.aws.amazon.com/cloudwatch/home?region=us-east-1#logStream:group=/kubernetes/flyte;prefix=var.log.containers.spark-app-name;streamFilter=typeLogStreamPrefix", + "spark-history.flyte/history/app-id", + } + + assert.Equal(t, expectedLinks, generatedLinks) +} + +func TestGetTaskPhase(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + expectedLogCtx := &core.LogContext{ + PrimaryPodName: "spark-pod", + Pods: []*core.PodLogContext{ + { + Namespace: "spark-namespace", + PodName: "spark-pod", + PrimaryContainerName: "spark-kubernetes-driver", + Containers: []*core.ContainerContext{ + { + ContainerName: "spark-kubernetes-driver", + }, + }, + }, + { + Namespace: "spark-namespace", + PodName: "exec-pod-2", + PrimaryContainerName: "spark-kubernetes-executor", + Containers: []*core.ContainerContext{ + { + ContainerName: "spark-kubernetes-executor", + }, + }, + }, + }, + } + + ctx := context.TODO() + pluginCtx := dummySparkPluginContext(dummySparkTaskTemplateContainer("", dummySparkConf), k8s.PluginState{}) + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.NewState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseQueued) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.SubmittedState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseInitializing) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.RunningState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.CompletedState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseSuccess) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.InvalidatingState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.FailingState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.PendingRerunState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.SucceedingState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRunning) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.FailedSubmissionState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRetryableFailure) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) + + taskPhase, err = sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.FailedState)) + assert.NoError(t, err) + assert.Equal(t, taskPhase.Phase(), pluginsCore.PhaseRetryableFailure) + assert.NotNil(t, taskPhase.Info()) + assert.Equal(t, expectedLogCtx, taskPhase.Info().LogContext) + assert.Nil(t, err) +} + +func TestGetTaskPhaseIncreasePhaseVersion(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + ctx := context.TODO() + + pluginState := k8s.PluginState{ + Phase: pluginsCore.PhaseInitializing, + PhaseVersion: pluginsCore.DefaultPhaseVersion, + Reason: "task submitted to K8s", + } + + pluginCtx := dummySparkPluginContext(dummySparkTaskTemplateContainer("", dummySparkConf), pluginState) + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.SubmittedState)) + + assert.NoError(t, err) + assert.Equal(t, taskPhase.Version(), pluginsCore.DefaultPhaseVersion+1) +} + +func dummySparkApplication(state sj.ApplicationStateType) *sj.SparkApplication { + + return &sj.SparkApplication{ + ObjectMeta: v1.ObjectMeta{ + Name: "spark-app-name", + Namespace: "spark-namespace", + }, + Status: sj.SparkApplicationStatus{ + SparkApplicationID: "app-id", + AppState: sj.ApplicationState{ + State: state, + }, + DriverInfo: sj.DriverInfo{ + PodName: "spark-pod", + WebUIIngressAddress: sparkUIAddress, + }, + ExecutionAttempts: 1, + ExecutorState: map[string]sparkOp.ExecutorState{ + "exec-pod-1": sparkOp.ExecutorPendingState, + "exec-pod-2": sparkOp.ExecutorRunningState, + }, + }, + } +} + +func dummySparkCustomObj(sparkConf map[string]string) *plugins.SparkJob { + sparkJob := plugins.SparkJob{} + + sparkJob.MainClass = sparkMainClass + sparkJob.MainApplicationFile = sparkApplicationFile + sparkJob.SparkConf = sparkConf + sparkJob.ApplicationType = plugins.SparkApplication_PYTHON + return &sparkJob +} + +func dummyPodSpec() *corev1.PodSpec { + return &corev1.PodSpec{ + InitContainers: []corev1.Container{ + { + Name: "init", + Image: testImage, + Args: testArgs, + }, + }, + Containers: []corev1.Container{ + { + Name: "primary", + Image: testImage, + Args: testArgs, + Env: dummyEnvVarsWithSecretRef, + }, + { + Name: "secondary", + Image: testImage, + Args: testArgs, + Env: flytek8s.ToK8sEnvVar(dummyEnvVars), + }, + }, + } +} + +func dummySparkTaskTemplateContainer(id string, sparkConf map[string]string) *core.TaskTemplate { + sparkJob := dummySparkCustomObj(sparkConf) + sparkJobJSON, err := utils.MarshalToString(sparkJob) + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + + err = stdlibUtils.UnmarshalStringToPb(sparkJobJSON, &structObj) + if err != nil { + panic(err) + } + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "container", + Target: &core.TaskTemplate_Container{ + Container: &core.Container{ + Image: testImage, + Args: testArgs, + Env: dummyEnvVars, + }, + }, + Custom: &structObj, + } +} + +func dummySparkTaskTemplatePod(id string, sparkConf map[string]string, podSpec *corev1.PodSpec) *core.TaskTemplate { + sparkJob := dummySparkCustomObj(sparkConf) + sparkJobJSON, err := utils.MarshalToString(sparkJob) + if err != nil { + panic(err) + } + + structObj := structpb.Struct{} + + err = stdlibUtils.UnmarshalStringToPb(sparkJobJSON, &structObj) + if err != nil { + panic(err) + } + + podSpecPb, err := utils.MarshalObjToStruct(podSpec) + if err != nil { + panic(err) + } + + return &core.TaskTemplate{ + Id: &core.Identifier{Name: id}, + Type: "k8s_pod", + Target: &core.TaskTemplate_K8SPod{ + K8SPod: &core.K8SPod{ + PodSpec: podSpecPb, + }, + }, + Config: map[string]string{ + flytek8s.PrimaryContainerKey: "primary", + }, + Custom: &structObj, + } +} + +func dummySparkTaskContext(taskTemplate *core.TaskTemplate, interruptible bool) pluginsCore.TaskExecutionContext { + taskCtx := &mocks.TaskExecutionContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + taskCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + + taskCtx.On("OutputWriter").Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + taskCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.On("GetResources").Return(&corev1.ResourceRequirements{}) + // No support for GPUs, and consequently, ExtendedResources on Spark plugin. + overrides.On("GetExtendedResources").Return(nil) + overrides.On("GetPodTemplate").Return(nil) + overrides.OnGetContainerImage().Return("") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.On("GetTaskExecutionID").Return(tID) + taskExecutionMetadata.On("GetNamespace").Return("test-namespace") + taskExecutionMetadata.On("GetAnnotations").Return(map[string]string{"annotation-1": "val1"}) + taskExecutionMetadata.On("GetLabels").Return(map[string]string{"label-1": "val1"}) + taskExecutionMetadata.On("GetOwnerReference").Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.On("GetSecurityContext").Return(core.SecurityContext{ + RunAs: &core.Identity{K8SServiceAccount: "new-val"}, + }) + taskExecutionMetadata.On("IsInterruptible").Return(interruptible) + taskExecutionMetadata.On("GetMaxAttempts").Return(uint32(1)) + taskExecutionMetadata.On("GetEnvironmentVariables").Return(nil) + taskExecutionMetadata.On("GetPlatformResources").Return(nil) + taskExecutionMetadata.On("GetOverrides").Return(overrides) + taskExecutionMetadata.On("GetK8sServiceAccount").Return("new-val") + taskExecutionMetadata.On("GetConsoleURL").Return("") + taskCtx.On("TaskExecutionMetadata").Return(taskExecutionMetadata) + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&k8s.PluginState{}).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = k8s.PluginState{} + return 0 + }, + func(v interface{}) error { + return nil + }) + + taskCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return taskCtx +} + +func dummySparkPluginContext(taskTemplate *core.TaskTemplate, pluginState k8s.PluginState) k8s.PluginContext { + return dummySparkPluginContextWithPods(taskTemplate, pluginState) +} + +func dummySparkPluginContextWithPods(taskTemplate *core.TaskTemplate, pluginState k8s.PluginState, pods ...client.Object) k8s.PluginContext { + pCtx := &k8smocks.PluginContext{} + inputReader := &pluginIOMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return("/input/prefix") + inputReader.OnGetInputPath().Return("/input") + inputReader.OnGetMatch(mock.Anything).Return(&core.LiteralMap{}, nil) + pCtx.OnInputReader().Return(inputReader) + + outputReader := &pluginIOMocks.OutputWriter{} + outputReader.OnGetOutputPath().Return("/data/outputs.pb") + outputReader.OnGetOutputPrefixPath().Return("/data/") + outputReader.OnGetRawOutputPrefix().Return("") + outputReader.OnGetCheckpointPrefix().Return("/checkpoint") + outputReader.OnGetPreviousCheckpointsPrefix().Return("/prev") + + pCtx.On("OutputWriter").Return(outputReader) + + taskReader := &mocks.TaskReader{} + taskReader.OnReadMatch(mock.Anything).Return(taskTemplate, nil) + pCtx.OnTaskReader().Return(taskReader) + + tID := &mocks.TaskExecutionID{} + tID.OnGetID().Return(core.TaskExecutionIdentifier{ + NodeExecutionId: &core.NodeExecutionIdentifier{ + ExecutionId: &core.WorkflowExecutionIdentifier{ + Name: "my_name", + Project: "my_project", + Domain: "my_domain", + }, + }, + }) + tID.On("GetGeneratedName").Return("some-acceptable-name") + tID.On("GetUniqueNodeID").Return("an-unique-id") + + overrides := &mocks.TaskOverrides{} + overrides.On("GetResources").Return(&corev1.ResourceRequirements{}) + // No support for GPUs, and consequently, ExtendedResources on Spark plugin. + overrides.On("GetExtendedResources").Return(nil) + overrides.OnGetContainerImage().Return("") + + taskExecutionMetadata := &mocks.TaskExecutionMetadata{} + taskExecutionMetadata.On("GetTaskExecutionID").Return(tID) + taskExecutionMetadata.On("GetNamespace").Return("test-namespace") + taskExecutionMetadata.On("GetAnnotations").Return(map[string]string{"annotation-1": "val1"}) + taskExecutionMetadata.On("GetLabels").Return(map[string]string{"label-1": "val1"}) + taskExecutionMetadata.On("GetOwnerReference").Return(v1.OwnerReference{ + Kind: "node", + Name: "blah", + }) + taskExecutionMetadata.On("GetSecurityContext").Return(core.SecurityContext{ + RunAs: &core.Identity{K8SServiceAccount: "new-val"}, + }) + taskExecutionMetadata.On("IsInterruptible").Return(false) + taskExecutionMetadata.On("GetMaxAttempts").Return(uint32(1)) + taskExecutionMetadata.On("GetEnvironmentVariables").Return(nil) + taskExecutionMetadata.On("GetPlatformResources").Return(nil) + taskExecutionMetadata.On("GetOverrides").Return(overrides) + taskExecutionMetadata.On("GetK8sServiceAccount").Return("new-val") + taskExecutionMetadata.On("GetConsoleURL").Return("") + pCtx.OnTaskExecutionMetadata().Return(taskExecutionMetadata) + + pluginStateReaderMock := mocks.PluginStateReader{} + pluginStateReaderMock.On("Get", mock.AnythingOfType(reflect.TypeOf(&pluginState).String())).Return( + func(v interface{}) uint8 { + *(v.(*k8s.PluginState)) = pluginState + return 0 + }, + func(v interface{}) error { + return nil + }) + + // Add K8sReader mock for pods + objs := make([]client.Object, len(pods)) + copy(objs, pods) + reader := fake.NewClientBuilder().WithObjects(objs...).Build() + pCtx.OnK8sReader().Return(reader) + + pCtx.OnPluginStateReader().Return(&pluginStateReaderMock) + return pCtx +} + +func defaultPluginConfig() *config.K8sPluginConfig { + // Set Interruptible Config + runAsUser := int64(1000) + dnsOptVal1 := "1" + dnsOptVal2 := "1" + dnsOptVal3 := "3" + + // Set scheduler + schedulerName := "custom-scheduler" + + // Node selectors + defaultNodeSelector := map[string]string{ + "x/default": "true", + } + interruptibleNodeSelector := map[string]string{ + "x/interruptible": "true", + } + + defaultPodHostNetwork := true + + // Default env vars passed explicitly and default env vars derived from environment + defaultEnvVars := make(map[string]string) + defaultEnvVars["foo"] = "bar" + + defaultEnvVarsFromEnv := make(map[string]string) + targetKeyFromEnv := "TEST_VAR_FROM_ENV_KEY" + targetValueFromEnv := "TEST_VAR_FROM_ENV_VALUE" + os.Setenv(targetKeyFromEnv, targetValueFromEnv) + defer os.Unsetenv(targetKeyFromEnv) + defaultEnvVarsFromEnv["fooEnv"] = targetKeyFromEnv + + // Default affinity/anti-affinity + defaultAffinity := &corev1.Affinity{ + NodeAffinity: &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "x/default", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + }, + }, + }, + }, + }, + }, + } + + // Interruptible/non-interruptible nodeselector requirement + interruptibleNodeSelectorRequirement := &corev1.NodeSelectorRequirement{ + Key: "x/interruptible", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + } + + nonInterruptibleNodeSelectorRequirement := &corev1.NodeSelectorRequirement{ + Key: "x/non-interruptible", + Operator: corev1.NodeSelectorOpIn, + Values: []string{"true"}, + } + + config := &config.K8sPluginConfig{ + DefaultAffinity: defaultAffinity, + DefaultPodSecurityContext: &corev1.PodSecurityContext{ + RunAsUser: &runAsUser, + }, + DefaultPodDNSConfig: &corev1.PodDNSConfig{ + Nameservers: []string{"8.8.8.8", "8.8.4.4"}, + Options: []corev1.PodDNSConfigOption{ + { + Name: "ndots", + Value: &dnsOptVal1, + }, + { + Name: "single-request-reopen", + }, + { + Name: "timeout", + Value: &dnsOptVal2, + }, + { + Name: "attempts", + Value: &dnsOptVal3, + }, + }, + Searches: []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, + }, + DefaultTolerations: []corev1.Toleration{ + { + Key: "x/flyte", + Value: "default", + Operator: "Equal", + Effect: "NoSchedule", + }, + }, + DefaultNodeSelector: defaultNodeSelector, + InterruptibleNodeSelector: interruptibleNodeSelector, + InterruptibleTolerations: []corev1.Toleration{ + { + Key: "x/flyte", + Value: "interruptible", + Operator: "Equal", + Effect: "NoSchedule", + }, + }, + InterruptibleNodeSelectorRequirement: interruptibleNodeSelectorRequirement, + NonInterruptibleNodeSelectorRequirement: nonInterruptibleNodeSelectorRequirement, + SchedulerName: schedulerName, + EnableHostNetworkingPod: &defaultPodHostNetwork, + DefaultEnvVars: defaultEnvVars, + DefaultEnvVarsFromEnv: defaultEnvVarsFromEnv, + } + return config +} + +func findEnvVarByName(envVars []corev1.EnvVar, name string) *corev1.EnvVar { + for _, envVar := range envVars { + if envVar.Name == name { + return &envVar + } + } + return nil +} + +func TestBuildResourceContainer(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + + // Case1: Valid Spark Task-Template + taskTemplate := dummySparkTaskTemplateContainer("blah-1", dummySparkConf) + + // Set spark custom feature config. + assert.NoError(t, setSparkConfig(&Config{ + Features: []Feature{ + { + Name: "feature1", + SparkConfig: map[string]string{"spark.hadoop.feature1": "true"}, + }, + { + Name: "feature2", + SparkConfig: map[string]string{"spark.hadoop.feature2": "true"}, + }, + }, + })) + + defaultConfig := defaultPluginConfig() + assert.NoError(t, config.SetK8sPluginConfig(defaultConfig)) + resource, err := sparkResourceHandler.BuildResource(context.TODO(), dummySparkTaskContext(taskTemplate, true)) + assert.Nil(t, err) + + assert.NotNil(t, resource) + sparkApp, ok := resource.(*sj.SparkApplication) + assert.True(t, ok) + assert.Equal(t, sparkMainClass, *sparkApp.Spec.MainClass) + assert.Equal(t, sparkApplicationFile, *sparkApp.Spec.MainApplicationFile) + assert.Equal(t, sj.PythonApplicationType, sparkApp.Spec.Type) + assert.Equal(t, testArgs, sparkApp.Spec.Arguments) + assert.Equal(t, testImage, *sparkApp.Spec.Image) + assert.NotNil(t, sparkApp.Spec.Driver.SparkPodSpec.SecurityContenxt) + assert.Equal(t, *sparkApp.Spec.Driver.SparkPodSpec.SecurityContenxt.RunAsUser, *defaultConfig.DefaultPodSecurityContext.RunAsUser) + assert.NotNil(t, sparkApp.Spec.Driver.DNSConfig) + assert.Equal(t, []string{"8.8.8.8", "8.8.4.4"}, sparkApp.Spec.Driver.DNSConfig.Nameservers) + assert.ElementsMatch(t, defaultConfig.DefaultPodDNSConfig.Options, sparkApp.Spec.Driver.DNSConfig.Options) + assert.Equal(t, []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, sparkApp.Spec.Driver.DNSConfig.Searches) + assert.NotNil(t, sparkApp.Spec.Executor.SparkPodSpec.SecurityContenxt) + assert.Equal(t, *sparkApp.Spec.Executor.SparkPodSpec.SecurityContenxt.RunAsUser, *defaultConfig.DefaultPodSecurityContext.RunAsUser) + assert.NotNil(t, sparkApp.Spec.Executor.DNSConfig) + assert.NotNil(t, sparkApp.Spec.Executor.DNSConfig) + assert.ElementsMatch(t, defaultConfig.DefaultPodDNSConfig.Options, sparkApp.Spec.Executor.DNSConfig.Options) + assert.Equal(t, []string{"ns1.svc.cluster-domain.example", "my.dns.search.suffix"}, sparkApp.Spec.Executor.DNSConfig.Searches) + + //Validate Driver/Executor Spec. + driverCores, _ := strconv.ParseInt(dummySparkConf["spark.driver.cores"], 10, 32) + execCores, _ := strconv.ParseInt(dummySparkConf["spark.executor.cores"], 10, 32) + execInstances, _ := strconv.ParseInt(dummySparkConf["spark.executor.instances"], 10, 32) + + assert.Equal(t, "new-val", *sparkApp.Spec.ServiceAccount) + assert.Equal(t, int32(driverCores), *sparkApp.Spec.Driver.Cores) + assert.Equal(t, int32(execCores), *sparkApp.Spec.Executor.Cores) + assert.Equal(t, int32(execInstances), *sparkApp.Spec.Executor.Instances) + assert.Equal(t, dummySparkConf["spark.driver.memory"], *sparkApp.Spec.Driver.Memory) + assert.Equal(t, dummySparkConf["spark.executor.memory"], *sparkApp.Spec.Executor.Memory) + assert.Equal(t, dummySparkConf["spark.batchScheduler"], *sparkApp.Spec.BatchScheduler) + assert.Equal(t, defaultConfig.SchedulerName, *sparkApp.Spec.Executor.SchedulerName) + assert.Equal(t, defaultConfig.SchedulerName, *sparkApp.Spec.Driver.SchedulerName) + assert.Equal(t, *defaultConfig.EnableHostNetworkingPod, *sparkApp.Spec.Executor.HostNetwork) + assert.Equal(t, *defaultConfig.EnableHostNetworkingPod, *sparkApp.Spec.Driver.HostNetwork) + + // Validate + // * Default tolerations set for both Driver and Executor. + // * Interruptible tolerations and node selector set for Executor but not Driver. + // * Default node selector set for both Driver and Executor. + // * Interruptible node selector requirements set for Executor Affinity, non-interruptiblefir Driver Affinity. + assert.Equal(t, 1, len(sparkApp.Spec.Driver.Tolerations)) + assert.Equal(t, 1, len(sparkApp.Spec.Driver.NodeSelector)) + assert.Equal(t, defaultConfig.DefaultNodeSelector, sparkApp.Spec.Driver.NodeSelector) + tolDriverDefault := sparkApp.Spec.Driver.Tolerations[0] + assert.Equal(t, tolDriverDefault.Key, "x/flyte") + assert.Equal(t, tolDriverDefault.Value, "default") + assert.Equal(t, tolDriverDefault.Operator, corev1.TolerationOperator("Equal")) + assert.Equal(t, tolDriverDefault.Effect, corev1.TaintEffect("NoSchedule")) + + assert.Equal(t, 2, len(sparkApp.Spec.Executor.Tolerations)) + assert.Equal(t, 2, len(sparkApp.Spec.Executor.NodeSelector)) + assert.Equal(t, map[string]string{ + "x/default": "true", + "x/interruptible": "true", + }, sparkApp.Spec.Executor.NodeSelector) + + tolExecInterrupt := sparkApp.Spec.Executor.Tolerations[0] + assert.Equal(t, tolExecInterrupt.Key, "x/flyte") + assert.Equal(t, tolExecInterrupt.Value, "interruptible") + assert.Equal(t, tolExecInterrupt.Operator, corev1.TolerationOperator("Equal")) + assert.Equal(t, tolExecInterrupt.Effect, corev1.TaintEffect("NoSchedule")) + + tolExecDefault := sparkApp.Spec.Executor.Tolerations[1] + assert.Equal(t, tolExecDefault.Key, "x/flyte") + assert.Equal(t, tolExecDefault.Value, "default") + assert.Equal(t, tolExecDefault.Operator, corev1.TolerationOperator("Equal")) + assert.Equal(t, tolExecDefault.Effect, corev1.TaintEffect("NoSchedule")) + + for confKey, confVal := range dummySparkConf { + exists := false + + if featureRegex.MatchString(confKey) && confKey != "spark.flyteorg.feature3.enabled" { + match := featureRegex.FindAllStringSubmatch(confKey, -1) + feature := match[0][len(match[0])-1] + assert.True(t, feature == "feature1" || feature == "feature2") + for k, v := range sparkApp.Spec.SparkConf { + key := "spark.hadoop." + feature + if k == key { + assert.Equal(t, v, "true") + exists = true + } + } + } else { + for k, v := range sparkApp.Spec.SparkConf { + + if k == confKey { + assert.Equal(t, v, confVal) + exists = true + } + } + } + assert.True(t, exists) + } + + assert.Equal(t, dummySparkConf["spark.driver.cores"], sparkApp.Spec.SparkConf["spark.kubernetes.driver.limit.cores"]) + assert.Equal(t, dummySparkConf["spark.executor.cores"], sparkApp.Spec.SparkConf["spark.kubernetes.executor.limit.cores"]) + assert.Greater(t, len(sparkApp.Spec.SparkConf["spark.kubernetes.driverEnv.FLYTE_START_TIME"]), 1) + assert.Equal(t, dummySparkConf["spark.flyteorg.feature3.enabled"], sparkApp.Spec.SparkConf["spark.flyteorg.feature3.enabled"]) + + assert.Equal(t, len(findEnvVarByName(sparkApp.Spec.Driver.Env, "FLYTE_MAX_ATTEMPTS").Value), 1) + assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Driver.Env, "foo").Value) + assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Executor.Env, "foo").Value) + assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Driver.Env, "fooEnv").Value) + assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Executor.Env, "fooEnv").Value) + + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.NonInterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Driver.Affinity.NodeAffinity) + + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.InterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Executor.Affinity.NodeAffinity) + + // Case 2: Driver/Executor request cores set. + dummyConfWithRequest := make(map[string]string) + + for k, v := range dummySparkConf { + dummyConfWithRequest[k] = v + } + + dummyConfWithRequest["spark.kubernetes.driver.request.cores"] = "3" + dummyConfWithRequest["spark.kubernetes.executor.request.cores"] = "4" + + taskTemplate = dummySparkTaskTemplateContainer("blah-1", dummyConfWithRequest) + resource, err = sparkResourceHandler.BuildResource(context.TODO(), dummySparkTaskContext(taskTemplate, false)) + assert.Nil(t, err) + assert.NotNil(t, resource) + sparkApp, ok = resource.(*sj.SparkApplication) + assert.True(t, ok) + + assert.Equal(t, dummyConfWithRequest["spark.kubernetes.driver.request.cores"], sparkApp.Spec.SparkConf["spark.kubernetes.driver.limit.cores"]) + assert.Equal(t, dummyConfWithRequest["spark.kubernetes.executor.request.cores"], sparkApp.Spec.SparkConf["spark.kubernetes.executor.limit.cores"]) + + // Case 3: Interruptible False + resource, err = sparkResourceHandler.BuildResource(context.TODO(), dummySparkTaskContext(taskTemplate, false)) + assert.Nil(t, err) + assert.NotNil(t, resource) + sparkApp, ok = resource.(*sj.SparkApplication) + assert.True(t, ok) + + // Validate Interruptible Toleration and NodeSelector not set for both Driver and Executors. + // Validate that the default Toleration and NodeSelector are set for both Driver and Executors. + assert.Equal(t, 1, len(sparkApp.Spec.Driver.Tolerations)) + assert.Equal(t, 1, len(sparkApp.Spec.Driver.NodeSelector)) + assert.Equal(t, defaultConfig.DefaultNodeSelector, sparkApp.Spec.Driver.NodeSelector) + assert.Equal(t, 1, len(sparkApp.Spec.Executor.Tolerations)) + assert.Equal(t, 1, len(sparkApp.Spec.Executor.NodeSelector)) + assert.Equal(t, defaultConfig.DefaultNodeSelector, sparkApp.Spec.Executor.NodeSelector) + assert.Equal(t, sparkApp.Spec.Executor.Tolerations[0].Key, "x/flyte") + assert.Equal(t, sparkApp.Spec.Executor.Tolerations[0].Value, "default") + assert.Equal(t, sparkApp.Spec.Driver.Tolerations[0].Key, "x/flyte") + assert.Equal(t, sparkApp.Spec.Driver.Tolerations[0].Value, "default") + + // Validate correct affinity and nodeselector requirements are set for both Driver and Executors. + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.NonInterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Driver.Affinity.NodeAffinity) + + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.NonInterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Executor.Affinity.NodeAffinity) + + // Case 4: Invalid Spark Task-Template + taskTemplate.Custom = nil + resource, err = sparkResourceHandler.BuildResource(context.TODO(), dummySparkTaskContext(taskTemplate, false)) + assert.NotNil(t, err) + assert.Nil(t, resource) +} + +func TestBuildResourcePodTemplate(t *testing.T) { + defaultConfig := defaultPluginConfig() + assert.NoError(t, config.SetK8sPluginConfig(defaultConfig)) + extraToleration := corev1.Toleration{ + Key: "x/flyte", + Value: "extra", + Operator: "Equal", + } + podSpec := dummyPodSpec() + podSpec.Tolerations = append(podSpec.Tolerations, extraToleration) + podSpec.NodeSelector = map[string]string{"x/custom": "foo"} + taskTemplate := dummySparkTaskTemplatePod("blah-1", dummySparkConf, podSpec) + taskTemplate.GetK8SPod() + sparkResourceHandler := sparkResourceHandler{} + + taskCtx := dummySparkTaskContext(taskTemplate, true) + resource, err := sparkResourceHandler.BuildResource(context.TODO(), taskCtx) + + assert.Nil(t, err) + assert.NotNil(t, resource) + sparkApp, ok := resource.(*sj.SparkApplication) + assert.True(t, ok) + + // Application + assert.Equal(t, v1.TypeMeta{ + Kind: KindSparkApplication, + APIVersion: sparkOp.SchemeGroupVersion.String(), + }, sparkApp.TypeMeta) + + // Application spec + assert.Equal(t, flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()), *sparkApp.Spec.ServiceAccount) + assert.Equal(t, sparkOp.PythonApplicationType, sparkApp.Spec.Type) + assert.Equal(t, testImage, *sparkApp.Spec.Image) + assert.Equal(t, testArgs, sparkApp.Spec.Arguments) + assert.Equal(t, sparkOp.RestartPolicy{ + Type: sparkOp.OnFailure, + OnSubmissionFailureRetries: intPtr(int32(14)), + }, sparkApp.Spec.RestartPolicy) + assert.Equal(t, sparkMainClass, *sparkApp.Spec.MainClass) + assert.Equal(t, sparkApplicationFile, *sparkApp.Spec.MainApplicationFile) + + // Driver + assert.Equal(t, utils.UnionMaps(defaultConfig.DefaultAnnotations, map[string]string{"annotation-1": "val1"}), sparkApp.Spec.Driver.Annotations) + assert.Equal(t, utils.UnionMaps(defaultConfig.DefaultLabels, map[string]string{"label-1": "val1"}), sparkApp.Spec.Driver.Labels) + assert.Equal(t, len(findEnvVarByName(sparkApp.Spec.Driver.Env, "FLYTE_MAX_ATTEMPTS").Value), 1) + assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Driver.Env, "foo").Value) + assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Driver.Env, "fooEnv").Value) + assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Driver.Env, "SECRET")) + assert.Equal(t, 10, len(sparkApp.Spec.Driver.Env)) + assert.Equal(t, testImage, *sparkApp.Spec.Driver.Image) + assert.Equal(t, flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()), *sparkApp.Spec.Driver.ServiceAccount) + assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Driver.SecurityContenxt) + assert.Equal(t, defaultConfig.DefaultPodDNSConfig, sparkApp.Spec.Driver.DNSConfig) + assert.Equal(t, defaultConfig.EnableHostNetworkingPod, sparkApp.Spec.Driver.HostNetwork) + assert.Equal(t, defaultConfig.SchedulerName, *sparkApp.Spec.Driver.SchedulerName) + assert.Equal(t, []corev1.Toleration{ + defaultConfig.DefaultTolerations[0], + extraToleration, + }, sparkApp.Spec.Driver.Tolerations) + assert.Equal(t, map[string]string{ + "x/default": "true", + "x/custom": "foo", + }, sparkApp.Spec.Driver.NodeSelector) + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.NonInterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Driver.Affinity.NodeAffinity) + cores, _ := strconv.ParseInt(dummySparkConf["spark.driver.cores"], 10, 32) + assert.Equal(t, intPtr(int32(cores)), sparkApp.Spec.Driver.Cores) + assert.Equal(t, dummySparkConf["spark.driver.memory"], *sparkApp.Spec.Driver.Memory) + + // Executor + assert.Equal(t, utils.UnionMaps(defaultConfig.DefaultAnnotations, map[string]string{"annotation-1": "val1"}), sparkApp.Spec.Executor.Annotations) + assert.Equal(t, utils.UnionMaps(defaultConfig.DefaultLabels, map[string]string{"label-1": "val1"}), sparkApp.Spec.Executor.Labels) + assert.Equal(t, defaultConfig.DefaultEnvVars["foo"], findEnvVarByName(sparkApp.Spec.Executor.Env, "foo").Value) + assert.Equal(t, defaultConfig.DefaultEnvVars["fooEnv"], findEnvVarByName(sparkApp.Spec.Executor.Env, "fooEnv").Value) + assert.Equal(t, findEnvVarByName(dummyEnvVarsWithSecretRef, "SECRET"), findEnvVarByName(sparkApp.Spec.Executor.Env, "SECRET")) + assert.Equal(t, 10, len(sparkApp.Spec.Executor.Env)) + assert.Equal(t, testImage, *sparkApp.Spec.Executor.Image) + assert.Equal(t, defaultConfig.DefaultPodSecurityContext, sparkApp.Spec.Executor.SecurityContenxt) + assert.Equal(t, defaultConfig.DefaultPodDNSConfig, sparkApp.Spec.Executor.DNSConfig) + assert.Equal(t, defaultConfig.EnableHostNetworkingPod, sparkApp.Spec.Executor.HostNetwork) + assert.Equal(t, defaultConfig.SchedulerName, *sparkApp.Spec.Executor.SchedulerName) + assert.ElementsMatch(t, []corev1.Toleration{ + defaultConfig.DefaultTolerations[0], + extraToleration, + defaultConfig.InterruptibleTolerations[0], + }, sparkApp.Spec.Executor.Tolerations) + assert.Equal(t, map[string]string{ + "x/default": "true", + "x/custom": "foo", + "x/interruptible": "true", + }, sparkApp.Spec.Executor.NodeSelector) + assert.Equal(t, &corev1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + defaultConfig.DefaultAffinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0], + *defaultConfig.InterruptibleNodeSelectorRequirement, + }, + }, + }, + }, + }, sparkApp.Spec.Executor.Affinity.NodeAffinity) + cores, _ = strconv.ParseInt(dummySparkConf["spark.executor.cores"], 10, 32) + instances, _ := strconv.ParseInt(dummySparkConf["spark.executor.instances"], 10, 32) + assert.Equal(t, intPtr(int32(instances)), sparkApp.Spec.Executor.Instances) + assert.Equal(t, intPtr(int32(cores)), sparkApp.Spec.Executor.Cores) + assert.Equal(t, dummySparkConf["spark.executor.memory"], *sparkApp.Spec.Executor.Memory) +} + +func TestGetPropertiesSpark(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + expected := k8s.PluginProperties{} + assert.Equal(t, expected, sparkResourceHandler.GetProperties()) +} + +func TestGetTaskPhaseWithNamespaceInLogContext(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + ctx := context.TODO() + + pluginCtx := dummySparkPluginContext(dummySparkTaskTemplateContainer("", dummySparkConf), k8s.PluginState{}) + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.RunningState)) + assert.NoError(t, err) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + assert.Equal(t, 2, len(taskPhase.Info().LogContext.Pods)) + + // Verify namespace is set in the driver pod log context + driverPodLogContext := taskPhase.Info().LogContext.Pods[0] + assert.Equal(t, "spark-namespace", driverPodLogContext.Namespace) + assert.Equal(t, "spark-pod", driverPodLogContext.PodName) + assert.Equal(t, defaultDriverPrimaryContainerName, driverPodLogContext.PrimaryContainerName) +} + +func TestGetTaskPhaseWithFailedPod(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + ctx := context.TODO() + + // Create a failed driver pod + pod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: "spark-pod", + Namespace: "spark-namespace", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodFailed, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: defaultDriverPrimaryContainerName, + State: corev1.ContainerState{ + Terminated: &corev1.ContainerStateTerminated{ + ExitCode: 1, + Reason: "Error", + Message: "Container failed", + }, + }, + }, + }, + }, + } + + pluginCtx := dummySparkPluginContextWithPods(dummySparkTaskTemplateContainer("", dummySparkConf), k8s.PluginState{}, pod) + + // Even though SparkApplication status is running, should return failure due to pod status + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.RunningState)) + assert.NoError(t, err) + assert.True(t, taskPhase.Phase().IsFailure()) +} + +func TestGetTaskPhaseWithPendingPodInvalidImage(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + ctx := context.TODO() + + // Create a pending driver pod with InvalidImageName - this should fail immediately + pod := &corev1.Pod{ + ObjectMeta: v1.ObjectMeta{ + Name: "spark-pod", + Namespace: "spark-namespace", + }, + Status: corev1.PodStatus{ + Phase: corev1.PodPending, + Conditions: []corev1.PodCondition{ + { + Type: corev1.PodReady, + Status: corev1.ConditionFalse, + LastTransitionTime: v1.Time{Time: time.Now()}, + Reason: "ContainersNotReady", + Message: "containers with unready status: [spark-kubernetes-driver]", + }, + }, + ContainerStatuses: []corev1.ContainerStatus{ + { + Name: defaultDriverPrimaryContainerName, + Ready: false, + State: corev1.ContainerState{ + Waiting: &corev1.ContainerStateWaiting{ + Reason: "InvalidImageName", + Message: "Invalid image name", + }, + }, + }, + }, + }, + } + + pluginCtx := dummySparkPluginContextWithPods(dummySparkTaskTemplateContainer("", dummySparkConf), k8s.PluginState{}, pod) + + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.SubmittedState)) + assert.NoError(t, err) + // Should detect the InvalidImageName and return a failure phase + assert.True(t, taskPhase.Phase().IsFailure()) +} + +func TestGetTaskPhaseContainerNameConstant(t *testing.T) { + sparkResourceHandler := sparkResourceHandler{} + ctx := context.TODO() + + pluginCtx := dummySparkPluginContext(dummySparkTaskTemplateContainer("", dummySparkConf), k8s.PluginState{}) + + taskPhase, err := sparkResourceHandler.GetTaskPhase(ctx, pluginCtx, dummySparkApplication(sj.CompletedState)) + assert.NoError(t, err) + assert.NotNil(t, taskPhase.Info()) + assert.NotNil(t, taskPhase.Info().LogContext) + + // Verify the constant is used for driver container names + driverPodLogContext := taskPhase.Info().LogContext.Pods[0] + assert.Equal(t, defaultDriverPrimaryContainerName, driverPodLogContext.PrimaryContainerName) + assert.Equal(t, 1, len(driverPodLogContext.Containers)) + assert.Equal(t, defaultDriverPrimaryContainerName, driverPodLogContext.Containers[0].ContainerName) +} diff --git a/flyteplugins/go/tasks/plugins/testing/config.go b/flyteplugins/go/tasks/plugins/testing/config.go new file mode 100644 index 0000000000..7aebb85d5b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/testing/config.go @@ -0,0 +1,23 @@ +package testing + +import ( + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + flytestdconfig "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + SleepDuration: flytestdconfig.Duration{Duration: 0 * time.Second}, + } + + ConfigSection = config.MustRegisterSubSection(echoTaskType, &defaultConfig) +) + +type Config struct { + // SleepDuration indicates the amount of time before transitioning to success + SleepDuration flytestdconfig.Duration `json:"sleep-duration" pflag:",Indicates the amount of time before transitioning to success"` +} diff --git a/flyteplugins/go/tasks/plugins/testing/config_flags.go b/flyteplugins/go/tasks/plugins/testing/config_flags.go new file mode 100755 index 0000000000..f4b2e60c7a --- /dev/null +++ b/flyteplugins/go/tasks/plugins/testing/config_flags.go @@ -0,0 +1,55 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package testing + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "sleep-duration"), defaultConfig.SleepDuration.String(), "Indicates the amount of time before transitioning to success") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/testing/config_flags_test.go b/flyteplugins/go/tasks/plugins/testing/config_flags_test.go new file mode 100755 index 0000000000..023e8986e0 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/testing/config_flags_test.go @@ -0,0 +1,116 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package testing + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_sleep-duration", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultConfig.SleepDuration.String() + + cmdFlags.Set("sleep-duration", testValue) + if vString, err := cmdFlags.GetString("sleep-duration"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.SleepDuration) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/testing/echo.go b/flyteplugins/go/tasks/plugins/testing/echo.go new file mode 100644 index 0000000000..52a2c09e72 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/testing/echo.go @@ -0,0 +1,191 @@ +package testing + +import ( + "context" + "fmt" + "sync" + "time" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/ioutils" + + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + idlcore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + echoTaskType = "echo" +) + +type EchoPlugin struct { + enqueueOwner core.EnqueueOwner + taskStartTimes map[string]time.Time + sync.Mutex +} + +func (e *EchoPlugin) GetID() string { + return echoTaskType +} + +func (e *EchoPlugin) GetProperties() core.PluginProperties { + return core.PluginProperties{} +} + +// Enqueue the task to be re-evaluated after SleepDuration. +// If the task is already enqueued, return the start time of the task. +func (e *EchoPlugin) addTask(ctx context.Context, tCtx core.TaskExecutionContext) time.Time { + e.Lock() + defer e.Unlock() + var startTime time.Time + var exists bool + taskExecutionID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + if startTime, exists = e.taskStartTimes[taskExecutionID]; !exists { + startTime = time.Now() + e.taskStartTimes[taskExecutionID] = startTime + + // start timer to enqueue owner once task sleep duration has elapsed + go func() { + echoConfig := ConfigSection.GetConfig().(*Config) + time.Sleep(echoConfig.SleepDuration.Duration) + // TODO @pvditt fix + //labels := map[string]string{ + // k8s.WorkflowID: tCtx.TaskExecutionMetadata().GetOwnerID().String(), + //} + //if err := e.enqueueOwner(labels); err != nil { + // logger.Warnf(ctx, "failed to enqueue owner [%s]: %v", tCtx.TaskExecutionMetadata().GetOwnerID(), err) + //} + }() + } + return startTime +} + +// Remove the task from the taskStartTimes map. +func (e *EchoPlugin) removeTask(taskExecutionID string) { + e.Lock() + defer e.Unlock() + delete(e.taskStartTimes, taskExecutionID) +} + +func (e *EchoPlugin) Handle(ctx context.Context, tCtx core.TaskExecutionContext) (core.Transition, error) { + echoConfig := ConfigSection.GetConfig().(*Config) + + if echoConfig.SleepDuration.Duration == time.Duration(0) { + return copyInputsToOutputs(ctx, tCtx) + } + + startTime := e.addTask(ctx, tCtx) + + if time.Since(startTime) >= echoConfig.SleepDuration.Duration { + return copyInputsToOutputs(ctx, tCtx) + } + + return core.DoTransition(core.PhaseInfoRunning(core.DefaultPhaseVersion, nil)), nil +} + +func (e *EchoPlugin) Abort(ctx context.Context, tCtx core.TaskExecutionContext) error { + return nil +} + +func (e *EchoPlugin) Finalize(ctx context.Context, tCtx core.TaskExecutionContext) error { + taskExecutionID := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + e.removeTask(taskExecutionID) + return nil +} + +// copyInputsToOutputs copies the input literals to the output location. +func copyInputsToOutputs(ctx context.Context, tCtx core.TaskExecutionContext) (core.Transition, error) { + inputToOutputVariableMappings, err := compileInputToOutputVariableMappings(ctx, tCtx) + if err != nil { + return core.UnknownTransition, err + } + + if len(inputToOutputVariableMappings) > 0 { + inputLiterals, err := tCtx.InputReader().Get(ctx) + if err != nil { + return core.UnknownTransition, err + } + + outputLiterals := make(map[string]*idlcore.Literal, len(inputToOutputVariableMappings)) + for inputVariableName, outputVariableName := range inputToOutputVariableMappings { + outputLiterals[outputVariableName] = inputLiterals.Literals[inputVariableName] + } + + outputLiteralMap := &idlcore.LiteralMap{ + Literals: outputLiterals, + } + + outputFile := tCtx.OutputWriter().GetOutputPath() + if err := tCtx.DataStore().WriteProtobuf(ctx, outputFile, storage.Options{}, outputLiteralMap); err != nil { + return core.UnknownTransition, err + } + + or := ioutils.NewRemoteFileOutputReader(ctx, tCtx.DataStore(), tCtx.OutputWriter(), 0) + if err = tCtx.OutputWriter().Put(ctx, or); err != nil { + return core.UnknownTransition, err + } + } + return core.DoTransition(core.PhaseInfoSuccess(nil)), nil +} + +func compileInputToOutputVariableMappings(ctx context.Context, tCtx core.TaskExecutionContext) (map[string]string, error) { + // validate outputs are castable from inputs otherwise error as this plugin is not applicable + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return nil, fmt.Errorf("failed to read TaskTemplate: [%w]", err) + } + + var inputs, outputs map[string]*idlcore.Variable + if taskTemplate.Interface != nil { + if taskTemplate.Interface.Inputs != nil { + inputs = taskTemplate.Interface.Inputs.Variables + } + if taskTemplate.Interface.Outputs != nil { + outputs = taskTemplate.Interface.Outputs.Variables + } + } + + if len(inputs) != len(outputs) { + return nil, fmt.Errorf("the number of input [%d] and output [%d] variables does not match", len(inputs), len(outputs)) + } else if len(inputs) > 1 { + return nil, fmt.Errorf("this plugin does not currently support more than one input variable") + } + + inputToOutputVariableMappings := make(map[string]string) + outputVariableNameUsed := make(map[string]struct{}) + for inputVariableName := range inputs { + firstCastableOutputName := "" + for outputVariableName := range outputs { + // TODO - need to check if types are castable to support multiple values + if _, ok := outputVariableNameUsed[outputVariableName]; !ok { + firstCastableOutputName = outputVariableName + break + } + } + + if len(firstCastableOutputName) == 0 { + return nil, fmt.Errorf("no castable output variable found for input variable [%s]", inputVariableName) + } + + outputVariableNameUsed[firstCastableOutputName] = struct{}{} + inputToOutputVariableMappings[inputVariableName] = firstCastableOutputName + } + + return inputToOutputVariableMappings, nil +} + +func init() { + pluginmachinery.PluginRegistry().RegisterCorePlugin( + core.PluginEntry{ + ID: echoTaskType, + RegisteredTaskTypes: []core.TaskType{echoTaskType}, + LoadPlugin: func(ctx context.Context, iCtx core.SetupContext) (core.Plugin, error) { + return &EchoPlugin{ + enqueueOwner: iCtx.EnqueueOwner(), + taskStartTimes: make(map[string]time.Time), + }, nil + }, + IsDefault: false, + }, + ) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/config.go b/flyteplugins/go/tasks/plugins/webapi/athena/config.go new file mode 100644 index 0000000000..ac55b3f58e --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/config.go @@ -0,0 +1,62 @@ +package athena + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + WebAPI: webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "default": 1000, + }, + ReadRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + Caching: webapi.CachingConfig{ + Size: 500000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + MaxSystemFailures: 5, + }, + ResourceMeta: nil, + }, + + ResourceConstraints: core.ResourceConstraintsSpec{ + ProjectScopeResourceConstraint: &core.ResourceConstraint{ + Value: 100, + }, + NamespaceScopeResourceConstraint: &core.ResourceConstraint{ + Value: 50, + }, + }, + + DefaultWorkGroup: "primary", + DefaultCatalog: "AwsDataCatalog", + } + + configSection = pluginsConfig.MustRegisterSubSection("athena", &defaultConfig) +) + +type Config struct { + WebAPI webapi.PluginConfig `json:"webApi" pflag:",Defines config for the base WebAPI plugin."` + ResourceConstraints core.ResourceConstraintsSpec `json:"resourceConstraints" pflag:"-,Defines resource constraints on how many executions to be created per project/overall at any given time."` + DefaultWorkGroup string `json:"defaultWorkGroup" pflag:",Defines the default workgroup to use when running on Athena unless overwritten by the task."` + DefaultCatalog string `json:"defaultCatalog" pflag:",Defines the default catalog to use when running on Athena unless overwritten by the task."` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/config_flags.go b/flyteplugins/go/tasks/plugins/webapi/athena/config_flags.go new file mode 100755 index 0000000000..8f35508943 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/config_flags.go @@ -0,0 +1,64 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package athena + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.qps"), defaultConfig.WebAPI.ReadRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.burst"), defaultConfig.WebAPI.ReadRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.qps"), defaultConfig.WebAPI.WriteRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.burst"), defaultConfig.WebAPI.WriteRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.size"), defaultConfig.WebAPI.Caching.Size, "Defines the maximum number of items to cache.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "webApi.caching.resyncInterval"), defaultConfig.WebAPI.Caching.ResyncInterval.String(), "Defines the sync interval.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.workers"), defaultConfig.WebAPI.Caching.Workers, "Defines the number of workers to start up to process items.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.maxSystemFailures"), defaultConfig.WebAPI.Caching.MaxSystemFailures, "Defines the number of failures to fetch a task before failing the task.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "defaultWorkGroup"), defaultConfig.DefaultWorkGroup, "Defines the default workgroup to use when running on Athena unless overwritten by the task.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "defaultCatalog"), defaultConfig.DefaultCatalog, "Defines the default catalog to use when running on Athena unless overwritten by the task.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/config_flags_test.go b/flyteplugins/go/tasks/plugins/webapi/athena/config_flags_test.go new file mode 100755 index 0000000000..ea7475faf1 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/config_flags_test.go @@ -0,0 +1,242 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package athena + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_webApi.readRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.readRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.size", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.size", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.size"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Size) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.resyncInterval", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultConfig.WebAPI.Caching.ResyncInterval.String() + + cmdFlags.Set("webApi.caching.resyncInterval", testValue) + if vString, err := cmdFlags.GetString("webApi.caching.resyncInterval"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.WebAPI.Caching.ResyncInterval) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.workers", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.workers"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.maxSystemFailures", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.maxSystemFailures", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.maxSystemFailures"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.MaxSystemFailures) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_defaultWorkGroup", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("defaultWorkGroup", testValue) + if vString, err := cmdFlags.GetString("defaultWorkGroup"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.DefaultWorkGroup) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_defaultCatalog", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("defaultCatalog", testValue) + if vString, err := cmdFlags.GetString("defaultCatalog"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.DefaultCatalog) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/plugin.go b/flyteplugins/go/tasks/plugins/webapi/athena/plugin.go new file mode 100644 index 0000000000..333ce46076 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/plugin.go @@ -0,0 +1,229 @@ +package athena + +import ( + "context" + "fmt" + "strings" + "time" + + awsSdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/athena" + athenaTypes "github.com/aws/aws-sdk-go-v2/service/athena/types" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/aws" + errors2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + ErrRemoteSystem errors.ErrorCode = "RemoteSystem" + ErrUser errors.ErrorCode = "User" + ErrSystem errors.ErrorCode = "System" +) + +type Plugin struct { + metricScope promutils.Scope + client *athena.Client + cfg *Config + awsConfig awsSdk.Config +} + +type ResourceWrapper struct { + Status *athenaTypes.QueryExecutionStatus + ResultsConfiguration *athenaTypes.ResultConfiguration +} + +func (p Plugin) GetConfig() webapi.PluginConfig { + return GetConfig().WebAPI +} + +func (p Plugin) ResourceRequirements(_ context.Context, _ webapi.TaskExecutionContextReader) ( + namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) { + + // Resource requirements are assumed to be the same. + return "default", p.cfg.ResourceConstraints, nil +} + +func (p Plugin) Create(ctx context.Context, tCtx webapi.TaskExecutionContextReader) (resourceMeta webapi.ResourceMeta, + resource webapi.Resource, err error) { + + queryInfo, err := extractQueryInfo(ctx, tCtx) + if err != nil { + return nil, nil, err + } + + if len(queryInfo.Workgroup) == 0 { + queryInfo.Workgroup = p.cfg.DefaultWorkGroup + } + + if len(queryInfo.Catalog) == 0 { + queryInfo.Catalog = p.cfg.DefaultCatalog + } + + if len(queryInfo.Database) == 0 { + return nil, nil, errors.Errorf(errors2.BadTaskSpecification, "Database must not be empty.") + } + + // https://docs.aws.amazon.com/athena/latest/APIReference/API_StartQueryExecution.html dictates that the length + // must be within the range [32, 128]. + clientRequestToken, err := tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedNameWith(32, 128) + if err != nil { + return nil, nil, errors.Wrapf(errors2.BadTaskSpecification, err, + "Generated Name [%v] couldn't be converted to a ClientRequestToken", + tCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName()) + } + + resp, err := p.client.StartQueryExecution(ctx, &athena.StartQueryExecutionInput{ + ClientRequestToken: awsSdk.String(clientRequestToken), + QueryExecutionContext: &athenaTypes.QueryExecutionContext{ + Database: awsSdk.String(queryInfo.Database), + Catalog: awsSdk.String(queryInfo.Catalog), + }, + ResultConfiguration: &athenaTypes.ResultConfiguration{ + // Workgroup settings can override the output location setting. + OutputLocation: awsSdk.String(tCtx.OutputWriter().GetRawOutputPrefix().String()), + }, + QueryString: awsSdk.String(queryInfo.QueryString), + WorkGroup: awsSdk.String(queryInfo.Workgroup), + }) + + if err != nil { + return "", "", err + } + + if resp.QueryExecutionId == nil { + return "", "", errors.Errorf(ErrRemoteSystem, "Service created an empty query id") + } + + return *resp.QueryExecutionId, nil, nil +} + +func (p Plugin) Get(ctx context.Context, tCtx webapi.GetContext) (latest webapi.Resource, err error) { + exec := tCtx.ResourceMeta().(string) + resp, err := p.client.GetQueryExecution(ctx, &athena.GetQueryExecutionInput{ + QueryExecutionId: awsSdk.String(exec), + }) + if err != nil { + return nil, err + } + + // Only cache fields we want to keep in memory instead of the potentially huge execution closure. + return ResourceWrapper{ + Status: resp.QueryExecution.Status, + ResultsConfiguration: resp.QueryExecution.ResultConfiguration, + }, nil +} + +func (p Plugin) Delete(ctx context.Context, tCtx webapi.DeleteContext) error { + resp, err := p.client.StopQueryExecution(ctx, &athena.StopQueryExecutionInput{ + QueryExecutionId: awsSdk.String(tCtx.ResourceMeta().(string)), + }) + if err != nil { + return err + } + + logger.Infof(ctx, "Deleted query execution [%v]", resp) + + return nil +} + +func (p Plugin) Status(ctx context.Context, tCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { + execID := tCtx.ResourceMeta().(string) + exec := tCtx.Resource().(ResourceWrapper) + if exec.Status == nil { + return core.PhaseInfoUndefined, errors.Errorf(ErrSystem, "No Status field set.") + } + + switch exec.Status.State { + case athenaTypes.QueryExecutionStateQueued: + fallthrough + case athenaTypes.QueryExecutionStateRunning: + return core.PhaseInfoRunning(1, createTaskInfo(execID, p.awsConfig)), nil + case athenaTypes.QueryExecutionStateCancelled: + reason := "Remote execution was aborted." + if reasonPtr := exec.Status.StateChangeReason; reasonPtr != nil { + reason = *reasonPtr + } + + return core.PhaseInfoRetryableFailure("ABORTED", reason, createTaskInfo(execID, p.awsConfig)), nil + case athenaTypes.QueryExecutionStateFailed: + reason := "Remote execution failed" + if reasonPtr := exec.Status.StateChangeReason; reasonPtr != nil { + reason = *reasonPtr + } + + return core.PhaseInfoRetryableFailure("FAILED", reason, createTaskInfo(execID, p.awsConfig)), nil + case athenaTypes.QueryExecutionStateSucceeded: + if outputLocation := exec.ResultsConfiguration.OutputLocation; outputLocation != nil { + // If WorkGroup settings overrode the client settings, the location submitted in the request might have been + // ignored. + err = writeOutput(ctx, tCtx, *outputLocation) + if err != nil { + logger.Warnf(ctx, "Failed to write output, uri [%s], err %s", *outputLocation, err.Error()) + return core.PhaseInfoUndefined, err + } + } + + return core.PhaseInfoSuccess(createTaskInfo(execID, p.awsConfig)), nil + } + + return core.PhaseInfoUndefined, errors.Errorf(ErrSystem, "Unknown execution phase [%v].", exec.Status.State) +} + +func createTaskInfo(queryID string, cfg awsSdk.Config) *core.TaskInfo { + timeNow := time.Now() + var consoleURL string + if strings.Contains(cfg.Region, "gov") { + consoleURL = "console.amazonaws-us-gov.com" + } else { + consoleURL = "console.aws.amazon.com" + } + return &core.TaskInfo{ + OccurredAt: &timeNow, + Logs: []*idlCore.TaskLog{ + { + Uri: fmt.Sprintf("https://%v.%v/athena/home?force®ion=%v#query/history/%v", + cfg.Region, + consoleURL, + cfg.Region, + queryID), + Name: "Athena Query Console", + }, + }, + ExternalResources: []*core.ExternalResource{ + { + ExternalID: queryID, + }, + }, + } +} + +func NewPlugin(_ context.Context, cfg *Config, awsConfig *aws.Config, metricScope promutils.Scope) (Plugin, error) { + sdkCfg, err := awsConfig.GetSdkConfig() + if err != nil { + return Plugin{}, err + } + + return Plugin{ + metricScope: metricScope, + client: athena.NewFromConfig(sdkCfg), + cfg: cfg, + awsConfig: sdkCfg, + }, nil +} + +func init() { + pluginmachinery.PluginRegistry().RegisterRemotePlugin(webapi.PluginEntry{ + ID: "athena", + SupportedTaskTypes: []core.TaskType{"hive", "presto"}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + return NewPlugin(ctx, GetConfig(), aws.GetConfig(), iCtx.MetricsScope()) + }, + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/athena/plugin_test.go new file mode 100644 index 0000000000..11fd9adadf --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/plugin_test.go @@ -0,0 +1,38 @@ +package athena + +import ( + "testing" + + awsSdk "github.com/aws/aws-sdk-go-v2/aws" + "github.com/stretchr/testify/assert" + + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestCreateTaskInfo(t *testing.T) { + taskInfo := createTaskInfo("query_id", awsSdk.Config{ + Region: "us-east-1", + }) + assert.EqualValues(t, []*idlCore.TaskLog{ + { + Uri: "https://us-east-1.console.aws.amazon.com/athena/home?force®ion=us-east-1#query/history/query_id", + Name: "Athena Query Console", + }, + }, taskInfo.Logs) + assert.Len(t, taskInfo.ExternalResources, 1) + assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "query_id") +} + +func TestCreateTaskInfoGovAWS(t *testing.T) { + taskInfo := createTaskInfo("query_id", awsSdk.Config{ + Region: "us-gov-east-1", + }) + assert.EqualValues(t, []*idlCore.TaskLog{ + { + Uri: "https://us-gov-east-1.console.amazonaws-us-gov.com/athena/home?force®ion=us-gov-east-1#query/history/query_id", + Name: "Athena Query Console", + }, + }, taskInfo.Logs) + assert.Len(t, taskInfo.ExternalResources, 1) + assert.Equal(t, taskInfo.ExternalResources[0].ExternalID, "query_id") +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/utils.go b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go new file mode 100644 index 0000000000..cdccba905b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/utils.go @@ -0,0 +1,150 @@ +package athena + +import ( + "context" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/ioutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/utils" + pb "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + pluginsIdl "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +func writeOutput(ctx context.Context, tCtx webapi.StatusContext, externalLocation string) error { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return err + } + + if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") + return nil + } + + resultsSchema, exists := taskTemplate.Interface.Outputs.Variables["results"] + if !exists { + logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") + return nil + } + + return tCtx.OutputWriter().Put(ctx, ioutils.NewInMemoryOutputReader( + &pb.LiteralMap{ + Literals: map[string]*pb.Literal{ + "results": { + Value: &pb.Literal_Scalar{ + Scalar: &pb.Scalar{Value: &pb.Scalar_Schema{ + Schema: &pb.Schema{ + Uri: externalLocation, + Type: resultsSchema.GetType().GetSchema(), + }, + }, + }, + }, + }, + }, + }, nil, nil)) +} + +type QueryInfo struct { + QueryString string + Workgroup string + Catalog string + Database string +} + +func validateHiveQuery(hiveQuery pluginsIdl.QuboleHiveJob) error { + if hiveQuery.Query == nil { + return errors.Errorf(errors.BadTaskSpecification, "Query is a required field.") + } + + if len(hiveQuery.Query.Query) == 0 { + return errors.Errorf(errors.BadTaskSpecification, "Query statement is a required field.") + } + + return nil +} + +func validatePrestoQuery(prestoQuery pluginsIdl.PrestoQuery) error { + if len(prestoQuery.Statement) == 0 { + return errors.Errorf(errors.BadTaskSpecification, "Statement is a required field.") + } + + return nil +} + +func extractQueryInfo(ctx context.Context, tCtx webapi.TaskExecutionContextReader) (QueryInfo, error) { + task, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return QueryInfo{}, err + } + + switch task.Type { + case "hive": + custom := task.GetCustom() + hiveQuery := pluginsIdl.QuboleHiveJob{} + err := utils.UnmarshalStructToPb(custom, &hiveQuery) + if err != nil { + return QueryInfo{}, errors.Wrapf(ErrUser, err, "Expects a valid QubleHiveJob proto in custom field.") + } + + if err = validateHiveQuery(hiveQuery); err != nil { + return QueryInfo{}, errors.Wrapf(ErrUser, err, "Expects a valid QubleHiveJob proto in custom field.") + } + + outputs, err := template.Render(ctx, []string{ + hiveQuery.Query.Query, + hiveQuery.ClusterLabel, + }, template.Parameters{ + TaskExecMetadata: tCtx.TaskExecutionMetadata(), + Inputs: tCtx.InputReader(), + OutputPath: tCtx.OutputWriter(), + Task: tCtx.TaskReader(), + }) + if err != nil { + return QueryInfo{}, err + } + + return QueryInfo{ + QueryString: outputs[0], + Database: outputs[1], + }, nil + case "presto": + custom := task.GetCustom() + prestoQuery := pluginsIdl.PrestoQuery{} + err := utils.UnmarshalStructToPb(custom, &prestoQuery) + if err != nil { + return QueryInfo{}, errors.Wrapf(ErrUser, err, "Expects a valid PrestoQuery proto in custom field.") + } + + if err = validatePrestoQuery(prestoQuery); err != nil { + return QueryInfo{}, errors.Wrapf(ErrUser, err, "Expects a valid PrestoQuery proto in custom field.") + } + + outputs, err := template.Render(ctx, []string{ + prestoQuery.RoutingGroup, + prestoQuery.Catalog, + prestoQuery.Schema, + prestoQuery.Statement, + }, template.Parameters{ + TaskExecMetadata: tCtx.TaskExecutionMetadata(), + Inputs: tCtx.InputReader(), + OutputPath: tCtx.OutputWriter(), + Task: tCtx.TaskReader(), + }) + if err != nil { + return QueryInfo{}, err + } + + return QueryInfo{ + Workgroup: outputs[0], + Catalog: outputs[1], + Database: outputs[2], + QueryString: outputs[3], + }, nil + } + + return QueryInfo{}, errors.Errorf(ErrUser, "Unexpected task type [%v].", task.Type) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/athena/utils_test.go b/flyteplugins/go/tasks/plugins/webapi/athena/utils_test.go new file mode 100644 index 0000000000..0bbd2e1a00 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/athena/utils_test.go @@ -0,0 +1,202 @@ +package athena + +import ( + "context" + "fmt" + "testing" + + "github.com/golang/protobuf/proto" + "github.com/stretchr/testify/assert" + + mocks2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + mocks3 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/ioutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/flytestdlib/utils" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + pb "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +func Test_writeOutput(t *testing.T) { + ctx := context.Background() + t.Run("No Outputs", func(t *testing.T) { + taskReader := &mocks2.TaskReader{} + taskReader.OnRead(ctx).Return(&core.TaskTemplate{}, nil) + + statusContext := &mocks.StatusContext{} + statusContext.OnTaskReader().Return(taskReader) + + err := writeOutput(context.Background(), statusContext, "s3://my-external-bucket/key") + assert.NoError(t, err) + }) + + t.Run("No Output named results", func(t *testing.T) { + taskReader := &mocks2.TaskReader{} + taskReader.OnRead(ctx).Return(&core.TaskTemplate{ + Interface: &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "myOutput": &core.Variable{}, + }, + }, + }, + }, nil) + + statusContext := &mocks.StatusContext{} + statusContext.OnTaskReader().Return(taskReader) + + err := writeOutput(context.Background(), statusContext, "s3://my-external-bucket/key") + assert.NoError(t, err) + }) + + t.Run("Valid Qubole", func(t *testing.T) { + statusContext := &mocks.StatusContext{} + taskReader := &mocks2.TaskReader{} + hive := &plugins.QuboleHiveJob{ + ClusterLabel: "mydb", + Query: &plugins.HiveQuery{ + Query: "Select * from mytable", + }, + } + + st, err := utils.MarshalPbToStruct(hive) + if !assert.NoError(t, err) { + assert.FailNowf(t, "expected to be able to marshal", "") + } + + taskReader.OnRead(ctx).Return(&core.TaskTemplate{ + Interface: &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "results": { + Type: &core.LiteralType{ + Type: &core.LiteralType_Schema{ + Schema: &core.SchemaType{ + Columns: []*core.SchemaType_SchemaColumn{}, + }, + }, + }, + }, + }, + }, + }, + Custom: st, + }, nil) + + statusContext.OnTaskReader().Return(taskReader) + + ow := &mocks3.OutputWriter{} + externalLocation := "s3://my-external-bucket/key" + ow.OnPut(ctx, ioutils.NewInMemoryOutputReader( + &pb.LiteralMap{ + Literals: map[string]*pb.Literal{ + "results": { + Value: &pb.Literal_Scalar{ + Scalar: &pb.Scalar{ + Value: &pb.Scalar_Schema{ + Schema: &pb.Schema{ + Uri: externalLocation, + Type: &core.SchemaType{ + Columns: []*core.SchemaType_SchemaColumn{}, + }, + }, + }, + }, + }, + }, + }, + }, nil, nil)).Return(nil) + statusContext.OnOutputWriter().Return(ow) + + err = writeOutput(context.Background(), statusContext, externalLocation) + assert.NoError(t, err) + }) +} + +func Test_ExtractQueryInfo(t *testing.T) { + ctx := context.Background() + validProtos := []struct { + message proto.Message + taskType string + }{ + { + message: &plugins.QuboleHiveJob{ + ClusterLabel: "mydb", + Query: &plugins.HiveQuery{ + Query: "Select * from mytable", + }, + }, + taskType: "hive", + }, + { + message: &plugins.PrestoQuery{ + Statement: "Select * from mytable", + Schema: "mytable", + RoutingGroup: "primary", + Catalog: "catalog", + }, + taskType: "presto", + }, + } + + for _, validProto := range validProtos { + t.Run(fmt.Sprintf("Valid %v", validProto.taskType), func(t *testing.T) { + tCtx := &mocks.TaskExecutionContextReader{} + taskReader := &mocks2.TaskReader{} + st, err := utils.MarshalPbToStruct(validProto.message) + if !assert.NoError(t, err) { + assert.FailNowf(t, "expected to be able to marshal", "") + } + + taskReader.OnRead(ctx).Return(&core.TaskTemplate{ + Type: validProto.taskType, + Interface: &core.TypedInterface{ + Outputs: &core.VariableMap{ + Variables: map[string]*core.Variable{ + "results": { + Type: &core.LiteralType{ + Type: &core.LiteralType_Schema{ + Schema: &core.SchemaType{ + Columns: []*core.SchemaType_SchemaColumn{}, + }, + }, + }, + }, + }, + }, + }, + Custom: st, + }, nil) + + tCtx.OnTaskReader().Return(taskReader) + + tMeta := &mocks2.TaskExecutionMetadata{} + tCtx.OnTaskExecutionMetadata().Return(tMeta) + + tID := &mocks2.TaskExecutionID{} + tMeta.OnGetTaskExecutionID().Return(tID) + tMeta.OnGetNamespace().Return("my-namespace") + + tID.OnGetGeneratedName().Return("generated-name") + + ow := &mocks3.OutputWriter{} + tCtx.OnOutputWriter().Return(ow) + ow.OnGetOutputPrefixPath().Return("s3://another") + ow.OnGetRawOutputPrefix().Return("s3://another/output") + ow.OnGetCheckpointPrefix().Return("/checkpoint") + ow.OnGetPreviousCheckpointsPrefix().Return("/prev") + + ir := &mocks3.InputReader{} + tCtx.OnInputReader().Return(ir) + ir.OnGetInputPath().Return(storage.DataReference("s3://something")) + ir.OnGetInputPrefixPath().Return(storage.DataReference("s3://something/2")) + ir.OnGet(ctx).Return(nil, nil) + + q, err := extractQueryInfo(ctx, tCtx) + assert.NoError(t, err) + assert.True(t, len(q.QueryString) > 0) + }) + } +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/config.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/config.go new file mode 100644 index 0000000000..01e04ac4a6 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/config.go @@ -0,0 +1,73 @@ +// Package bigquery implements WebAPI plugin for Google BigQuery +package bigquery + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/google" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +//go:generate pflags Config --default-var=defaultConfig + +var ( + defaultConfig = Config{ + WebAPI: webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "default": 1000, + }, + ReadRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + Caching: webapi.CachingConfig{ + Size: 500000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + MaxSystemFailures: 5, + }, + ResourceMeta: nil, + }, + ResourceConstraints: core.ResourceConstraintsSpec{ + ProjectScopeResourceConstraint: &core.ResourceConstraint{ + Value: 100, + }, + NamespaceScopeResourceConstraint: &core.ResourceConstraint{ + Value: 50, + }, + }, + GoogleTokenSource: google.GetDefaultConfig(), + } + + configSection = pluginsConfig.MustRegisterSubSection("bigquery", &defaultConfig) +) + +// Config is config for 'bigquery' plugin +type Config struct { + // WebAPI defines config for the base WebAPI plugin + WebAPI webapi.PluginConfig `json:"webApi" pflag:",Defines config for the base WebAPI plugin."` + + // ResourceConstraints defines resource constraints on how many executions to be created per project/overall at any given time + ResourceConstraints core.ResourceConstraintsSpec `json:"resourceConstraints" pflag:"-,Defines resource constraints on how many executions to be created per project/overall at any given time."` + + // GoogleTokenSource configures token source for BigQuery client + GoogleTokenSource google.TokenSourceFactoryConfig `json:"googleTokenSource" pflag:",Defines Google token source"` + + // BigQueryEndpoint overrides BigQuery client endpoint, only for testing + BigQueryEndpoint string `json:"bigQueryEndpoint" pflag:",Overrides BigQuery client endpoint, only for testing. If not set, uses the default endpoint."` +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags.go new file mode 100755 index 0000000000..7389f74aa9 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags.go @@ -0,0 +1,67 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package bigquery + +import ( + "encoding/json" + "reflect" + + "fmt" + + "github.com/spf13/pflag" +) + +// If v is a pointer, it will get its element value or the zero value of the element type. +// If v is not a pointer, it will return it as is. +func (Config) elemValueOrNil(v interface{}) interface{} { + if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { + if reflect.ValueOf(v).IsNil() { + return reflect.Zero(t.Elem()).Interface() + } else { + return reflect.ValueOf(v).Interface() + } + } else if v == nil { + return reflect.Zero(t).Interface() + } + + return v +} + +func (Config) mustJsonMarshal(v interface{}) string { + raw, err := json.Marshal(v) + if err != nil { + panic(err) + } + + return string(raw) +} + +func (Config) mustMarshalJSON(v json.Marshaler) string { + raw, err := v.MarshalJSON() + if err != nil { + panic(err) + } + + return string(raw) +} + +// GetPFlagSet will return strongly types pflags for all fields in Config and its nested types. The format of the +// flags is json-name.json-sub-name... etc. +func (cfg Config) GetPFlagSet(prefix string) *pflag.FlagSet { + cmdFlags := pflag.NewFlagSet("Config", pflag.ExitOnError) + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.qps"), defaultConfig.WebAPI.ReadRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.readRateLimiter.burst"), defaultConfig.WebAPI.ReadRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.qps"), defaultConfig.WebAPI.WriteRateLimiter.QPS, "Defines the max rate of calls per second.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.writeRateLimiter.burst"), defaultConfig.WebAPI.WriteRateLimiter.Burst, "Defines the maximum burst size.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.size"), defaultConfig.WebAPI.Caching.Size, "Defines the maximum number of items to cache.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "webApi.caching.resyncInterval"), defaultConfig.WebAPI.Caching.ResyncInterval.String(), "Defines the sync interval.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.workers"), defaultConfig.WebAPI.Caching.Workers, "Defines the number of workers to start up to process items.") + cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "webApi.caching.maxSystemFailures"), defaultConfig.WebAPI.Caching.MaxSystemFailures, "Defines the number of failures to fetch a task before failing the task.") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "googleTokenSource.type"), defaultConfig.GoogleTokenSource.Type, "Defines type of TokenSourceFactory, possible values are 'default' and 'gke-task-workload-identity'") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "googleTokenSource.gke-task-workload-identity.remoteClusterConfig.name"), defaultConfig.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Name, "Friendly name of the remote cluster") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "googleTokenSource.gke-task-workload-identity.remoteClusterConfig.endpoint"), defaultConfig.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Endpoint, " Remote K8s cluster endpoint") + cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "googleTokenSource.gke-task-workload-identity.remoteClusterConfig.enabled"), defaultConfig.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Enabled, " Boolean flag to enable or disable") + cmdFlags.String(fmt.Sprintf("%v%v", prefix, "bigQueryEndpoint"), defaultConfig.BigQueryEndpoint, "Overrides BigQuery client endpoint, only for testing. If not set, uses the default endpoint.") + return cmdFlags +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags_test.go new file mode 100755 index 0000000000..768191ac97 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_flags_test.go @@ -0,0 +1,284 @@ +// Code generated by go generate; DO NOT EDIT. +// This file was generated by robots. + +package bigquery + +import ( + "encoding/json" + "fmt" + "reflect" + "strings" + "testing" + + "github.com/mitchellh/mapstructure" + "github.com/stretchr/testify/assert" +) + +var dereferencableKindsConfig = map[reflect.Kind]struct{}{ + reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, +} + +// Checks if t is a kind that can be dereferenced to get its underlying type. +func canGetElementConfig(t reflect.Kind) bool { + _, exists := dereferencableKindsConfig[t] + return exists +} + +// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the +// object. Otherwise, it'll just pass on the original data. +func jsonUnmarshalerHookConfig(_, to reflect.Type, data interface{}) (interface{}, error) { + unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() + if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || + (canGetElementConfig(to.Kind()) && to.Elem().Implements(unmarshalerType)) { + + raw, err := json.Marshal(data) + if err != nil { + fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + res := reflect.New(to).Interface() + err = json.Unmarshal(raw, &res) + if err != nil { + fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) + return data, nil + } + + return res, nil + } + + return data, nil +} + +func decode_Config(input, result interface{}) error { + config := &mapstructure.DecoderConfig{ + TagName: "json", + WeaklyTypedInput: true, + Result: result, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + jsonUnmarshalerHookConfig, + ), + } + + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +func join_Config(arr interface{}, sep string) string { + listValue := reflect.ValueOf(arr) + strs := make([]string, 0, listValue.Len()) + for i := 0; i < listValue.Len(); i++ { + strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) + } + + return strings.Join(strs, sep) +} + +func testDecodeJson_Config(t *testing.T, val, result interface{}) { + assert.NoError(t, decode_Config(val, result)) +} + +func testDecodeRaw_Config(t *testing.T, vStringSlice, result interface{}) { + assert.NoError(t, decode_Config(vStringSlice, result)) +} + +func TestConfig_GetPFlagSet(t *testing.T) { + val := Config{} + cmdFlags := val.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) +} + +func TestConfig_SetFlags(t *testing.T) { + actual := Config{} + cmdFlags := actual.GetPFlagSet("") + assert.True(t, cmdFlags.HasFlags()) + + t.Run("Test_webApi.readRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.readRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.readRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.readRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.ReadRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.qps", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.qps", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.qps"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.QPS) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.writeRateLimiter.burst", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.writeRateLimiter.burst", testValue) + if vInt, err := cmdFlags.GetInt("webApi.writeRateLimiter.burst"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.WriteRateLimiter.Burst) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.size", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.size", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.size"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Size) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.resyncInterval", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := defaultConfig.WebAPI.Caching.ResyncInterval.String() + + cmdFlags.Set("webApi.caching.resyncInterval", testValue) + if vString, err := cmdFlags.GetString("webApi.caching.resyncInterval"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.WebAPI.Caching.ResyncInterval) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.workers", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.workers", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.workers"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.Workers) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_webApi.caching.maxSystemFailures", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("webApi.caching.maxSystemFailures", testValue) + if vInt, err := cmdFlags.GetInt("webApi.caching.maxSystemFailures"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vInt), &actual.WebAPI.Caching.MaxSystemFailures) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_googleTokenSource.type", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("googleTokenSource.type", testValue) + if vString, err := cmdFlags.GetString("googleTokenSource.type"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.GoogleTokenSource.Type) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_googleTokenSource.gke-task-workload-identity.remoteClusterConfig.name", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.name", testValue) + if vString, err := cmdFlags.GetString("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.name"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Name) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_googleTokenSource.gke-task-workload-identity.remoteClusterConfig.endpoint", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.endpoint", testValue) + if vString, err := cmdFlags.GetString("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.endpoint"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Endpoint) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_googleTokenSource.gke-task-workload-identity.remoteClusterConfig.enabled", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.enabled", testValue) + if vBool, err := cmdFlags.GetBool("googleTokenSource.gke-task-workload-identity.remoteClusterConfig.enabled"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vBool), &actual.GoogleTokenSource.GkeTaskWorkloadIdentityTokenSourceFactoryConfig.RemoteClusterConfig.Enabled) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) + t.Run("Test_bigQueryEndpoint", func(t *testing.T) { + + t.Run("Override", func(t *testing.T) { + testValue := "1" + + cmdFlags.Set("bigQueryEndpoint", testValue) + if vString, err := cmdFlags.GetString("bigQueryEndpoint"); err == nil { + testDecodeJson_Config(t, fmt.Sprintf("%v", vString), &actual.BigQueryEndpoint) + + } else { + assert.FailNow(t, err.Error()) + } + }) + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/config_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_test.go new file mode 100644 index 0000000000..88de9cf16b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/config_test.go @@ -0,0 +1,28 @@ +package bigquery + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/protobuf/types/known/structpb" +) + +func TestUnmarshalBigQueryQueryConfig(t *testing.T) { + custom := structpb.Struct{ + Fields: map[string]*structpb.Value{ + "projectId": structpb.NewStringValue("project-id"), + "location": structpb.NewStringValue("EU"), + "query": structpb.NewStringValue("SELECT 1"), + }, + } + + config, err := unmarshalQueryJobConfig(&custom) + + assert.NoError(t, err) + + assert.Equal(t, config, &QueryJobConfig{ + ProjectID: "project-id", + Location: "EU", + Query: "SELECT 1", + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/integration_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/integration_test.go new file mode 100644 index 0000000000..8a2d5d4ea4 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/integration_test.go @@ -0,0 +1,170 @@ +package bigquery + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/api/bigquery/v2" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + pluginUtils "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/tests" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + httpPost string = "POST" + httpGet string = "GET" +) + +func TestEndToEnd(t *testing.T) { + server := newFakeBigQueryServer() + defer server.Close() + + iter := func(ctx context.Context, tCtx pluginCore.TaskExecutionContext) error { + return nil + } + + cfg := defaultConfig + cfg.BigQueryEndpoint = server.URL + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + + pluginEntry := pluginmachinery.CreateRemotePlugin(newBigQueryJobTaskPlugin()) + plugin, err := pluginEntry.LoadPlugin(context.TODO(), newFakeSetupContext()) + assert.NoError(t, err) + + inputs, _ := coreutils.MakeLiteralMap(map[string]interface{}{"x": 1}) + template := flyteIdlCore.TaskTemplate{ + Type: bigqueryQueryJobTask, + Target: &flyteIdlCore.TaskTemplate_Sql{Sql: &flyteIdlCore.Sql{Statement: "SELECT 1", Dialect: flyteIdlCore.Sql_ANSI}}, + } + + t.Run("SELECT 1", func(t *testing.T) { + queryJobConfig := QueryJobConfig{ + ProjectID: "flyte", + } + + custom, _ := pluginUtils.MarshalObjToStruct(queryJobConfig) + template.Custom = custom + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) + + t.Run("cache job result", func(t *testing.T) { + queryJobConfig := QueryJobConfig{ + ProjectID: "cache", + } + + custom, _ := pluginUtils.MarshalObjToStruct(queryJobConfig) + template.Custom = custom + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) + + t.Run("pending job", func(t *testing.T) { + queryJobConfig := QueryJobConfig{ + ProjectID: "pending", + } + + custom, _ := pluginUtils.MarshalObjToStruct(queryJobConfig) + template.Custom = custom + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) +} + +func newFakeBigQueryServer() *httptest.Server { + return httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + if request.URL.Path == "/projects/flyte/jobs" && request.Method == httpPost { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusRunning}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + if strings.HasPrefix(request.URL.Path, "/projects/flyte/jobs/") && request.Method == httpGet { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusDone}, + Configuration: &bigquery.JobConfiguration{ + Query: &bigquery.JobConfigurationQuery{ + DestinationTable: &bigquery.TableReference{ + ProjectId: "project", DatasetId: "dataset", TableId: "table"}}}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == "/projects/cache/jobs" && request.Method == httpPost { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusDone}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + if strings.HasPrefix(request.URL.Path, "/projects/cache/jobs/") && request.Method == httpGet { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusDone}, + Configuration: &bigquery.JobConfiguration{ + Query: &bigquery.JobConfigurationQuery{ + DestinationTable: &bigquery.TableReference{ + ProjectId: "project", DatasetId: "dataset", TableId: "table"}}}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == "/projects/pending/jobs" && request.Method == httpPost { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusPending}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + if strings.HasPrefix(request.URL.Path, "/projects/pending/jobs/") && request.Method == httpGet { + writer.WriteHeader(200) + job := bigquery.Job{Status: &bigquery.JobStatus{State: bigqueryStatusDone}} + bytes, _ := json.Marshal(job) + _, _ = writer.Write(bytes) + return + } + + writer.WriteHeader(500) + })) +} + +func newFakeSetupContext() *pluginCoreMocks.SetupContext { + fakeResourceRegistrar := pluginCoreMocks.ResourceRegistrar{} + fakeResourceRegistrar.On("RegisterResourceQuota", mock.Anything, mock.Anything, mock.Anything).Return(nil) + labeled.SetMetricKeys(contextutils.NamespaceKey) + + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test")) + fakeSetupContext.OnResourceRegistrar().Return(&fakeResourceRegistrar) + + return &fakeSetupContext +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go new file mode 100644 index 0000000000..a51d275835 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin.go @@ -0,0 +1,575 @@ +package bigquery + +import ( + "context" + "encoding/gob" + "fmt" + "net/http" + "time" + + structpb "github.com/golang/protobuf/ptypes/struct" + "golang.org/x/oauth2" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" + "google.golang.org/api/option" + + pluginErrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/flytek8s" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/google" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/ioutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + bigqueryQueryJobTask = "bigquery_query_job_task" + bigqueryConsolePath = "https://console.cloud.google.com/bigquery" + bigqueryStatusRunning = "RUNNING" + bigqueryStatusPending = "PENDING" + bigqueryStatusDone = "DONE" +) + +type Plugin struct { + metricScope promutils.Scope + cfg *Config + googleTokenSource google.TokenSourceFactory +} + +type ResourceWrapper struct { + Status *bigquery.JobStatus + CreateError *googleapi.Error + OutputLocation string +} + +type ResourceMetaWrapper struct { + K8sServiceAccount string + Namespace string + JobReference bigquery.JobReference +} + +func (p Plugin) GetConfig() webapi.PluginConfig { + return GetConfig().WebAPI +} + +func (p Plugin) ResourceRequirements(_ context.Context, _ webapi.TaskExecutionContextReader) ( + namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) { + + // Resource requirements are assumed to be the same. + return "default", p.cfg.ResourceConstraints, nil +} + +func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextReader) (webapi.ResourceMeta, + webapi.Resource, error) { + return p.createImpl(ctx, taskCtx) +} + +func (p Plugin) createImpl(ctx context.Context, taskCtx webapi.TaskExecutionContextReader) (*ResourceMetaWrapper, + *ResourceWrapper, error) { + + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + jobID := taskCtx.TaskExecutionMetadata().GetTaskExecutionID().GetGeneratedName() + + if err != nil { + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to fetch task specification") + } + + inputs, err := taskCtx.InputReader().Get(ctx) + + if err != nil { + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to fetch task inputs") + } + + var job *bigquery.Job + + namespace := taskCtx.TaskExecutionMetadata().GetNamespace() + k8sServiceAccount := flytek8s.GetServiceAccountNameFromTaskExecutionMetadata(taskCtx.TaskExecutionMetadata()) + identity := google.Identity{K8sNamespace: namespace, K8sServiceAccount: k8sServiceAccount} + client, err := p.newBigQueryClient(ctx, identity) + + if err != nil { + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to get bigquery client") + } + + if taskTemplate.Type == bigqueryQueryJobTask { + job, err = createQueryJob(jobID, taskTemplate.GetCustom(), inputs) + } else { + err = pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unexpected task type [%v]", taskTemplate.Type) + } + + if err != nil { + return nil, nil, err + } + + job.Configuration.Query.Query = taskTemplate.GetSql().Statement + job.Configuration.Labels = taskCtx.TaskExecutionMetadata().GetLabels() + + resp, err := client.Jobs.Insert(job.JobReference.ProjectId, job).Do() + + if err != nil { + apiError, ok := err.(*googleapi.Error) + resourceMeta := ResourceMetaWrapper{ + JobReference: *job.JobReference, + Namespace: namespace, + K8sServiceAccount: k8sServiceAccount, + } + + if ok && apiError.Code == 409 { + job, err := client.Jobs.Get(resourceMeta.JobReference.ProjectId, resourceMeta.JobReference.JobId).Do() + + if err != nil { + err := pluginErrors.Wrapf( + pluginErrors.RuntimeFailure, + err, + "failed to get job [%s]", + formatJobReference(resourceMeta.JobReference)) + + return nil, nil, err + } + + resource := ResourceWrapper{Status: job.Status} + + return &resourceMeta, &resource, nil + } + + if ok { + resource := ResourceWrapper{CreateError: apiError} + + return &resourceMeta, &resource, nil + } + + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "failed to create query job") + } + + var outputLocation string + if resp.Status != nil && resp.Status.State == bigqueryStatusDone { + getResp, err := client.Jobs.Get(job.JobReference.ProjectId, job.JobReference.JobId).Do() + + if err != nil { + err := pluginErrors.Wrapf( + pluginErrors.RuntimeFailure, + err, + "failed to get job [%s]", + formatJobReference(*job.JobReference)) + + return nil, nil, err + } + outputLocation = constructOutputLocation(ctx, getResp) + } + resource := ResourceWrapper{Status: resp.Status, OutputLocation: outputLocation} + resourceMeta := ResourceMetaWrapper{ + JobReference: *job.JobReference, + Namespace: namespace, + K8sServiceAccount: k8sServiceAccount, + } + + return &resourceMeta, &resource, nil +} + +func createQueryJob(jobID string, custom *structpb.Struct, inputs *flyteIdlCore.LiteralMap) (*bigquery.Job, error) { + queryJobConfig, err := unmarshalQueryJobConfig(custom) + + if err != nil { + return nil, pluginErrors.Wrapf(pluginErrors.BadTaskSpecification, err, "can't unmarshall struct to QueryJobConfig") + } + + jobConfigurationQuery, err := getJobConfigurationQuery(queryJobConfig, inputs) + + if err != nil { + return nil, pluginErrors.Wrapf(pluginErrors.BadTaskSpecification, err, "unable to fetch task inputs") + } + + jobReference := bigquery.JobReference{ + JobId: jobID, + Location: queryJobConfig.Location, + ProjectId: queryJobConfig.ProjectID, + } + + return &bigquery.Job{ + Configuration: &bigquery.JobConfiguration{ + Query: jobConfigurationQuery, + }, + JobReference: &jobReference, + }, nil +} + +func (p Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest webapi.Resource, err error) { + return p.getImpl(ctx, taskCtx) +} + +func (p Plugin) getImpl(ctx context.Context, taskCtx webapi.GetContext) (wrapper *ResourceWrapper, err error) { + resourceMeta := taskCtx.ResourceMeta().(*ResourceMetaWrapper) + + identity := google.Identity{ + K8sNamespace: resourceMeta.Namespace, + K8sServiceAccount: resourceMeta.K8sServiceAccount, + } + client, err := p.newBigQueryClient(ctx, identity) + + if err != nil { + return nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to get client") + } + + job, err := client.Jobs.Get(resourceMeta.JobReference.ProjectId, resourceMeta.JobReference.JobId).Location(resourceMeta.JobReference.Location).Do() + + if err != nil { + err := pluginErrors.Wrapf( + pluginErrors.RuntimeFailure, + err, + "failed to get job [%s]", + formatJobReference(resourceMeta.JobReference)) + + return nil, err + } + + outputLocation := constructOutputLocation(ctx, job) + return &ResourceWrapper{ + Status: job.Status, + OutputLocation: outputLocation, + }, nil +} + +func (p Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error { + if taskCtx.ResourceMeta() == nil { + return nil + } + + resourceMeta := taskCtx.ResourceMeta().(*ResourceMetaWrapper) + + identity := google.Identity{ + K8sNamespace: resourceMeta.Namespace, + K8sServiceAccount: resourceMeta.K8sServiceAccount, + } + client, err := p.newBigQueryClient(ctx, identity) + + if err != nil { + return err + } + + _, err = client.Jobs.Cancel(resourceMeta.JobReference.ProjectId, resourceMeta.JobReference.JobId).Location(resourceMeta.JobReference.Location).Do() + + if err != nil { + return err + } + + logger.Infof(ctx, "Cancelled job [%s]", formatJobReference(resourceMeta.JobReference)) + + return nil +} + +func (p Plugin) Status(ctx context.Context, tCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { + resourceMeta := tCtx.ResourceMeta().(*ResourceMetaWrapper) + resource := tCtx.Resource().(*ResourceWrapper) + version := pluginsCore.DefaultPhaseVersion + + if resource == nil { + return core.PhaseInfoUndefined, nil + } + + taskInfo := createTaskInfo(resourceMeta) + + if resource.CreateError != nil { + return handleCreateError(resource.CreateError, taskInfo), nil + } + + switch resource.Status.State { + case bigqueryStatusPending: + return core.PhaseInfoQueuedWithTaskInfo(time.Now(), version, "Query is PENDING", taskInfo), nil + + case bigqueryStatusRunning: + return core.PhaseInfoRunning(version, taskInfo), nil + + case bigqueryStatusDone: + if resource.Status.ErrorResult != nil { + return handleErrorResult( + resource.Status.ErrorResult.Reason, + resource.Status.ErrorResult.Message, + taskInfo), nil + } + err = writeOutput(ctx, tCtx, resource.OutputLocation) + if err != nil { + logger.Warnf(ctx, "Failed to write output, uri [%s], err %s", resource.OutputLocation, err.Error()) + return core.PhaseInfoUndefined, err + } + return pluginsCore.PhaseInfoSuccess(taskInfo), nil + } + + return core.PhaseInfoUndefined, pluginErrors.Errorf(pluginsCore.SystemErrorCode, "unknown execution phase [%v].", resource.Status.State) +} + +func constructOutputLocation(ctx context.Context, job *bigquery.Job) string { + if job == nil || job.Configuration == nil || job.Configuration.Query == nil || job.Configuration.Query.DestinationTable == nil { + return "" + } + dst := job.Configuration.Query.DestinationTable + outputLocation := fmt.Sprintf("bq://%v:%v.%v", dst.ProjectId, dst.DatasetId, dst.TableId) + logger.Debugf(ctx, "BigQuery saves query results to [%v]", outputLocation) + return outputLocation +} + +func writeOutput(ctx context.Context, tCtx webapi.StatusContext, OutputLocation string) error { + taskTemplate, err := tCtx.TaskReader().Read(ctx) + if err != nil { + return err + } + + if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") + return nil + } + + resultsStructuredDatasetType, exists := taskTemplate.Interface.Outputs.Variables["results"] + if !exists { + logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") + return nil + } + return tCtx.OutputWriter().Put(ctx, ioutils.NewInMemoryOutputReader( + &flyteIdlCore.LiteralMap{ + Literals: map[string]*flyteIdlCore.Literal{ + "results": { + Value: &flyteIdlCore.Literal_Scalar{ + Scalar: &flyteIdlCore.Scalar{ + Value: &flyteIdlCore.Scalar_StructuredDataset{ + StructuredDataset: &flyteIdlCore.StructuredDataset{ + Uri: OutputLocation, + Metadata: &flyteIdlCore.StructuredDatasetMetadata{ + StructuredDatasetType: resultsStructuredDatasetType.GetType().GetStructuredDatasetType(), + }, + }, + }, + }, + }, + }, + }, + }, nil, nil)) +} + +func handleCreateError(createError *googleapi.Error, taskInfo *core.TaskInfo) core.PhaseInfo { + code := fmt.Sprintf("http%d", createError.Code) + + userExecutionError := &flyteIdlCore.ExecutionError{ + Message: createError.Message, + Kind: flyteIdlCore.ExecutionError_USER, + Code: code, + } + + systemExecutionError := &flyteIdlCore.ExecutionError{ + Message: createError.Message, + Kind: flyteIdlCore.ExecutionError_SYSTEM, + Code: code, + } + + if createError.Code >= http.StatusBadRequest && createError.Code < http.StatusInternalServerError { + return core.PhaseInfoFailed(pluginsCore.PhasePermanentFailure, userExecutionError, taskInfo) + } + + if createError.Code >= http.StatusInternalServerError { + return core.PhaseInfoFailed(pluginsCore.PhaseRetryableFailure, systemExecutionError, taskInfo) + } + + // something unexpected happened, just terminate task + return core.PhaseInfoFailed(pluginsCore.PhasePermanentFailure, systemExecutionError, taskInfo) +} + +func handleErrorResult(reason string, message string, taskInfo *core.TaskInfo) core.PhaseInfo { + phaseCode := reason + phaseReason := message + + // see https://cloud.google.com/bigquery/docs/error-messages + + // user errors are errors where users have to take action, e.g. fix their code + // all errors with project configuration are also considered as user errors + + // system errors are errors where system doesn't work well and system owners have to take action + // all errors internal to BigQuery are also considered as system errors + + // transient errors are retryable, if any action is needed, errors are permanent + + switch reason { + case "": + return pluginsCore.PhaseInfoSuccess(taskInfo) + + // This error returns when you try to access a resource such as a dataset, table, view, or job that you + // don't have access to. This error also returns when you try to modify a read-only object. + case "accessDenied": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when there is a temporary server failure such as a network connection problem or + // a server overload. + case "backendError": + return pluginsCore.PhaseInfoSystemRetryableFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when billing isn't enabled for the project. + case "billingNotEnabled": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when BigQuery has temporarily denylisted the operation you attempted to perform, + // usually to prevent a service outage. This error rarely occurs. + case "blocked": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when trying to create a job, dataset, or table that already exists. The error also + // returns when a job's writeDisposition property is set to WRITE_EMPTY and the destination table accessed + // by the job already exists. + case "duplicate": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when an internal error occurs within BigQuery. + case "internalError": + return pluginsCore.PhaseInfoSystemRetryableFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when there is any kind of invalid input other than an invalid query, such as missing + // required fields or an invalid table schema. Invalid queries return an invalidQuery error instead. + case "invalid": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when you attempt to run an invalid query. + case "invalidQuery": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when you attempt to schedule a query with invalid user credentials. + case "invalidUser": + return pluginsCore.PhaseInfoSystemRetryableFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when you refer to a resource (a dataset, a table, or a job) that doesn't exist. + // This can also occur when using snapshot decorators to refer to deleted tables that have recently been + // streamed to. + case "notFound": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This job error returns when you try to access a feature that isn't implemented. + case "notImplemented": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when your project exceeds a BigQuery quota, a custom quota, or when you haven't set up + // billing and you have exceeded the free tier for queries. + case "quotaExceeded": + return pluginsCore.PhaseInfoRetryableFailure(phaseCode, phaseReason, taskInfo) + + case "rateLimitExceeded": + return pluginsCore.PhaseInfoRetryableFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when you try to delete a dataset that contains tables or when you try to delete a job + // that is currently running. + case "resourceInUse": + return pluginsCore.PhaseInfoSystemRetryableFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when your query uses too many resources. + case "resourcesExceeded": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This error returns when your query's results are larger than the maximum response size. Some queries execute + // in multiple stages, and this error returns when any stage returns a response size that is too large, even if + // the final result is smaller than the maximum. This error commonly returns when queries use an ORDER BY + // clause. + case "responseTooLarge": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // This status code returns when a job is canceled. + case "stopped": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + // Certain BigQuery tables are backed by data managed by other Google product teams. This error indicates that + // one of these tables is unavailable. + case "tableUnavailable": + return pluginsCore.PhaseInfoSystemRetryableFailure(phaseCode, phaseReason, taskInfo) + + // The job timed out. + case "timeout": + return pluginsCore.PhaseInfoFailure(phaseCode, phaseReason, taskInfo) + + default: + return pluginsCore.PhaseInfoSystemFailure(phaseCode, phaseReason, taskInfo) + } +} + +func createTaskInfo(resourceMeta *ResourceMetaWrapper) *core.TaskInfo { + timeNow := time.Now() + j := formatJobReferenceForQueryParam(resourceMeta.JobReference) + + return &core.TaskInfo{ + OccurredAt: &timeNow, + Logs: []*flyteIdlCore.TaskLog{ + { + Uri: fmt.Sprintf("%s?project=%v&j=%v&page=queryresults", + bigqueryConsolePath, + resourceMeta.JobReference.ProjectId, + j), + Name: "BigQuery Console", + }, + }, + } +} + +func formatJobReference(reference bigquery.JobReference) string { + return fmt.Sprintf("%s:%s.%s", reference.ProjectId, reference.Location, reference.JobId) +} + +func formatJobReferenceForQueryParam(jobReference bigquery.JobReference) string { + return fmt.Sprintf("bq:%s:%s", jobReference.Location, jobReference.JobId) +} + +func (p Plugin) newBigQueryClient(ctx context.Context, identity google.Identity) (*bigquery.Service, error) { + options := []option.ClientOption{ + option.WithScopes("https://www.googleapis.com/auth/bigquery"), + // FIXME how do I access current version? + option.WithUserAgent(fmt.Sprintf("%s/%s", "flytepropeller", "LATEST")), + } + + // for mocking/testing purposes + if p.cfg.BigQueryEndpoint != "" { + options = append(options, + option.WithEndpoint(p.cfg.BigQueryEndpoint), + option.WithTokenSource(oauth2.StaticTokenSource(&oauth2.Token{}))) + } else if p.cfg.GoogleTokenSource.Type != "default" { + + tokenSource, err := p.googleTokenSource.GetTokenSource(ctx, identity) + + if err != nil { + return nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, "unable to get token source") + } + + options = append(options, option.WithTokenSource(tokenSource)) + } else { + logger.Infof(ctx, "BigQuery client read $GOOGLE_APPLICATION_CREDENTIALS by default") + } + + return bigquery.NewService(ctx, options...) +} + +func NewPlugin(cfg *Config, metricScope promutils.Scope) (*Plugin, error) { + googleTokenSource, err := google.NewTokenSourceFactory(cfg.GoogleTokenSource) + + if err != nil { + return nil, pluginErrors.Wrapf(pluginErrors.PluginInitializationFailed, err, "failed to get google token source") + } + + return &Plugin{ + metricScope: metricScope, + cfg: cfg, + googleTokenSource: googleTokenSource, + }, nil +} + +func newBigQueryJobTaskPlugin() webapi.PluginEntry { + return webapi.PluginEntry{ + ID: "bigquery", + SupportedTaskTypes: []core.TaskType{bigqueryQueryJobTask}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + cfg := GetConfig() + + return NewPlugin(cfg, iCtx.MetricsScope()) + }, + } +} + +func init() { + gob.Register(ResourceMetaWrapper{}) + gob.Register(ResourceWrapper{}) + + pluginmachinery.PluginRegistry().RegisterRemotePlugin(newBigQueryJobTaskPlugin()) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go new file mode 100644 index 0000000000..d5f824e09d --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/plugin_test.go @@ -0,0 +1,315 @@ +package bigquery + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/api/bigquery/v2" + "google.golang.org/api/googleapi" + "k8s.io/apimachinery/pkg/util/rand" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + coreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + ioMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func init() { + labeled.SetMetricKeys(contextutils.NamespaceKey) +} + +func TestFormatJobReference(t *testing.T) { + t.Run("format job reference", func(t *testing.T) { + jobReference := bigquery.JobReference{ + JobId: "my-job-id", + Location: "EU", + ProjectId: "flyte-test", + } + + str := formatJobReference(jobReference) + + assert.Equal(t, "flyte-test:EU.my-job-id", str) + }) +} + +func TestConstructOutputLocation(t *testing.T) { + job := &bigquery.Job{ + Configuration: &bigquery.JobConfiguration{ + Query: &bigquery.JobConfigurationQuery{ + DestinationTable: &bigquery.TableReference{ + ProjectId: "project", + DatasetId: "dataset", + TableId: "table", + }, + }, + }, + } + ol := constructOutputLocation(context.Background(), job) + assert.Equal(t, ol, "bq://project:dataset.table") + + job.Configuration.Query.DestinationTable = nil + ol = constructOutputLocation(context.Background(), job) + assert.Equal(t, ol, "") +} + +func TestCreateTaskInfo(t *testing.T) { + t.Run("create task info", func(t *testing.T) { + resourceMeta := ResourceMetaWrapper{ + JobReference: bigquery.JobReference{ + JobId: "my-job-id", + Location: "EU", + ProjectId: "flyte-test", + }, + } + + taskInfo := createTaskInfo(&resourceMeta) + + assert.Equal(t, 1, len(taskInfo.Logs)) + assert.Equal(t, flyteIdlCore.TaskLog{ + Uri: "https://console.cloud.google.com/bigquery?project=flyte-test&j=bq:EU:my-job-id&page=queryresults", + Name: "BigQuery Console", + }, *taskInfo.Logs[0]) + }) +} + +func TestOutputWriter(t *testing.T) { + ctx := context.Background() + statusContext := &mocks.StatusContext{} + + template := flyteIdlCore.TaskTemplate{} + tr := &coreMocks.TaskReader{} + tr.OnRead(ctx).Return(&template, nil) + statusContext.OnTaskReader().Return(tr) + + outputLocation := "bq://project:flyte.table" + err := writeOutput(ctx, statusContext, outputLocation) + assert.NoError(t, err) + + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + + outputWriter := &ioMocks.OutputWriter{} + outputWriter.OnPutMatch(mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + or := args.Get(1).(io.OutputReader) + literals, ee, err := or.Read(ctx) + assert.NoError(t, err) + + sd := literals.GetLiterals()["results"].GetScalar().GetStructuredDataset() + assert.Equal(t, sd.Uri, outputLocation) + assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].Name, "col1") + assert.Equal(t, sd.Metadata.GetStructuredDatasetType().Columns[0].LiteralType.GetSimple(), flyteIdlCore.SimpleType_INTEGER) + + if ee != nil { + assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetErrorPath(), storage.Options{}, ee)) + } + + if literals != nil { + assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetOutputPath(), storage.Options{}, literals)) + } + }) + + execID := rand.String(3) + basePrefix := storage.DataReference("fake://bucket/prefix/" + execID) + outputWriter.OnGetOutputPath().Return(basePrefix + "/outputs.pb") + statusContext.OnOutputWriter().Return(outputWriter) + + template = flyteIdlCore.TaskTemplate{ + Interface: &flyteIdlCore.TypedInterface{ + Outputs: &flyteIdlCore.VariableMap{ + Variables: map[string]*flyteIdlCore.Variable{ + "results": { + Type: &flyteIdlCore.LiteralType{ + Type: &flyteIdlCore.LiteralType_StructuredDatasetType{ + StructuredDatasetType: &flyteIdlCore.StructuredDatasetType{ + Columns: []*flyteIdlCore.StructuredDatasetType_DatasetColumn{ + { + Name: "col1", + LiteralType: &flyteIdlCore.LiteralType{ + Type: &flyteIdlCore.LiteralType_Simple{ + Simple: flyteIdlCore.SimpleType_INTEGER, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + tr.OnRead(ctx).Return(&template, nil) + statusContext.OnTaskReader().Return(tr) + err = writeOutput(ctx, statusContext, outputLocation) + assert.NoError(t, err) +} + +func TestHandleCreateError(t *testing.T) { + occurredAt := time.Now() + taskInfo := core.TaskInfo{OccurredAt: &occurredAt} + + t.Run("handle 401", func(t *testing.T) { + createError := googleapi.Error{ + Code: 401, + Message: "user xxx is not authorized", + } + + phase := handleCreateError(&createError, &taskInfo) + + assert.Equal(t, flyteIdlCore.ExecutionError{ + Code: "http401", + Message: "user xxx is not authorized", + Kind: flyteIdlCore.ExecutionError_USER, + }, *phase.Err()) + assert.Equal(t, taskInfo, *phase.Info()) + }) + + t.Run("handle 500", func(t *testing.T) { + createError := googleapi.Error{ + Code: 500, + Message: "oops", + } + + phase := handleCreateError(&createError, &taskInfo) + + assert.Equal(t, flyteIdlCore.ExecutionError{ + Code: "http500", + Message: "oops", + Kind: flyteIdlCore.ExecutionError_SYSTEM, + }, *phase.Err()) + assert.Equal(t, taskInfo, *phase.Info()) + }) +} + +func TestHandleErrorResult(t *testing.T) { + occurredAt := time.Now() + taskInfo := core.TaskInfo{OccurredAt: &occurredAt} + + type args struct { + reason string + phase core.Phase + errorKind flyteIdlCore.ExecutionError_ErrorKind + } + + tests := []args{ + { + reason: "accessDenied", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "backendError", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_SYSTEM, + }, + { + reason: "billingNotEnabled", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "blocked", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "duplicate", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "internalError", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_SYSTEM, + }, + { + reason: "invalid", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "invalidQuery", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "invalidUser", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_SYSTEM, + }, + { + reason: "notFound", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "notImplemented", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "quotaExceeded", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "rateLimitExceeded", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "resourceInUse", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_SYSTEM, + }, + + { + reason: "resourcesExceeded", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + + { + reason: "responseTooLarge", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "stopped", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + { + reason: "tableUnavailable", + phase: pluginsCore.PhaseRetryableFailure, + errorKind: flyteIdlCore.ExecutionError_SYSTEM, + }, + { + reason: "timeout", + phase: pluginsCore.PhasePermanentFailure, + errorKind: flyteIdlCore.ExecutionError_USER, + }, + } + + for _, test := range tests { + t.Run(test.reason, func(t *testing.T) { + phaseInfo := handleErrorResult(test.reason, "message", &taskInfo) + + assert.Equal(t, test.phase, phaseInfo.Phase()) + assert.Equal(t, test.reason, phaseInfo.Err().Code) + assert.Equal(t, test.errorKind, phaseInfo.Err().Kind) + assert.Equal(t, "message", phaseInfo.Err().Message) + }) + } +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go new file mode 100644 index 0000000000..a4f01c2b63 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job.go @@ -0,0 +1,261 @@ +package bigquery + +import ( + "strconv" + + structpb "github.com/golang/protobuf/ptypes/struct" + "github.com/pkg/errors" + "google.golang.org/api/bigquery/v2" + + pluginErrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginUtils "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +type QueryJobConfig struct { + Location string `json:"location"` + ProjectID string `json:"projectId"` + + // AllowLargeResults: [Optional] If true and query uses legacy SQL + // dialect, allows the query to produce arbitrarily large result tables + // at a slight cost in performance. Requires destinationTable to be set. + // For standard SQL queries, this flag is ignored and large results are + // always allowed. However, you must still set destinationTable when + // result size exceeds the allowed maximum response size. + AllowLargeResults bool `json:"allowLargeResults,omitempty"` + + // Clustering: [Beta] Clustering specification for the destination + // table. Must be specified with time-based partitioning, data in the + // table will be first partitioned and subsequently clustered. + Clustering *bigquery.Clustering `json:"clustering,omitempty"` + + // CreateDisposition: [Optional] Specifies whether the job is allowed to + // create new tables. The following values are supported: + // CREATE_IF_NEEDED: If the table does not exist, BigQuery creates the + // table. CREATE_NEVER: The table must already exist. If it does not, a + // 'notFound' error is returned in the job result. The default value is + // CREATE_IF_NEEDED. Creation, truncation and append actions occur as + // one atomic update upon job completion. + CreateDisposition string `json:"createDisposition,omitempty"` + + // DefaultDataset: [Optional] Specifies the default dataset to use for + // unqualified table names in the query. Note that this does not alter + // behavior of unqualified dataset names. + DefaultDataset *bigquery.DatasetReference `json:"defaultDataset,omitempty"` + + // DestinationEncryptionConfiguration: Custom encryption configuration + // (e.g., Cloud KMS keys). + DestinationEncryptionConfiguration *bigquery.EncryptionConfiguration `json:"destinationEncryptionConfiguration,omitempty"` + + // DestinationTable: [Optional] Describes the table where the query + // results should be stored. If not present, a new table will be created + // to store the results. This property must be set for large results + // that exceed the maximum response size. + DestinationTable *bigquery.TableReference `json:"destinationTable,omitempty"` + + // FlattenResults: [Optional] If true and query uses legacy SQL dialect, + // flattens all nested and repeated fields in the query results. + // allowLargeResults must be true if this is set to false. For standard + // SQL queries, this flag is ignored and results are never flattened. + // + // Default: true + FlattenResults *bool `json:"flattenResults,omitempty"` + + // MaximumBillingTier: [Optional] Limits the billing tier for this job. + // Queries that have resource usage beyond this tier will fail (without + // incurring a charge). If unspecified, this will be set to your project + // default. + // + // Default: 1 + MaximumBillingTier *int64 `json:"maximumBillingTier,omitempty"` + + // MaximumBytesBilled: [Optional] Limits the bytes billed for this job. + // Queries that will have bytes billed beyond this limit will fail + // (without incurring a charge). If unspecified, this will be set to + // your project default. + MaximumBytesBilled int64 `json:"maximumBytesBilled,omitempty,string"` + + // Priority: [Optional] Specifies a priority for the query. Possible + // values include INTERACTIVE and BATCH. The default value is + // INTERACTIVE. + Priority string `json:"priority,omitempty"` + + // Query: [Required] SQL query text to execute. The useLegacySql field + // can be used to indicate whether the query uses legacy SQL or standard + // SQL. + Query string `json:"query,omitempty"` + + // SchemaUpdateOptions: Allows the schema of the destination table to be + // updated as a side effect of the query job. Schema update options are + // supported in two cases: when writeDisposition is WRITE_APPEND; when + // writeDisposition is WRITE_TRUNCATE and the destination table is a + // partition of a table, specified by partition decorators. For normal + // tables, WRITE_TRUNCATE will always overwrite the schema. One or more + // of the following values are specified: ALLOW_FIELD_ADDITION: allow + // adding a nullable field to the schema. ALLOW_FIELD_RELAXATION: allow + // relaxing a required field in the original schema to nullable. + SchemaUpdateOptions []string `json:"schemaUpdateOptions,omitempty"` + + // TableDefinitions: [Optional] If querying an external data source + // outside of BigQuery, describes the data format, location and other + // properties of the data source. By defining these properties, the data + // source can then be queried as if it were a standard BigQuery table. + TableDefinitions map[string]bigquery.ExternalDataConfiguration `json:"tableDefinitions,omitempty"` + + // TimePartitioning: Time-based partitioning specification for the + // destination table. Only one of timePartitioning and rangePartitioning + // should be specified. + TimePartitioning *bigquery.TimePartitioning `json:"timePartitioning,omitempty"` + + // UseLegacySQL: Specifies whether to use BigQuery's legacy SQL dialect + // for this query. The default value is true. If set to false, the query + // will use BigQuery's standard SQL: + // https://cloud.google.com/bigquery/sql-reference/ When useLegacySql is + // set to false, the value of flattenResults is ignored; query will be + // run as if flattenResults is false. + // + // Default: true + UseLegacySQL *bool `json:"useLegacySql,omitempty"` + + // UseQueryCache: [Optional] Whether to look for the result in the query + // cache. The query cache is a best-effort cache that will be flushed + // whenever tables in the query are modified. Moreover, the query cache + // is only available when a query does not have a destination table + // specified. The default value is true. + // + // Default: true + UseQueryCache *bool `json:"useQueryCache,omitempty"` + + // UserDefinedFunctionResources: Describes user-defined function + // resources used in the query. + UserDefinedFunctionResources []*bigquery.UserDefinedFunctionResource `json:"userDefinedFunctionResources,omitempty"` + + // WriteDisposition: [Optional] Specifies the action that occurs if the + // destination table already exists. The following values are supported: + // WRITE_TRUNCATE: If the table already exists, BigQuery overwrites the + // table data and uses the schema from the query result. WRITE_APPEND: + // If the table already exists, BigQuery appends the data to the table. + // WRITE_EMPTY: If the table already exists and contains data, a + // 'duplicate' error is returned in the job result. The default value is + // WRITE_EMPTY. Each action is atomic and only occurs if BigQuery is + // able to complete the job successfully. Creation, truncation and + // append actions occur as one atomic update upon job completion. + WriteDisposition string `json:"writeDisposition,omitempty"` +} + +func unmarshalQueryJobConfig(structObj *structpb.Struct) (*QueryJobConfig, error) { + queryJobConfig := QueryJobConfig{} + err := pluginUtils.UnmarshalStructToObj(structObj, &queryJobConfig) + + if err != nil { + return nil, errors.Wrapf(err, "failed to unmarshal QueryJobConfig") + } + + return &queryJobConfig, nil +} + +func getJobConfigurationQuery(custom *QueryJobConfig, inputs *flyteIdlCore.LiteralMap) (*bigquery.JobConfigurationQuery, error) { + queryParameters, err := getQueryParameters(inputs.Literals) + + if err != nil { + return nil, pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unable build query parameters [%v]", err.Error()) + } + + // BigQuery supports query parameters to help prevent SQL injection when queries are constructed using user input. + // This feature is only available with standard SQL syntax. For more detail: https://cloud.google.com/bigquery/docs/parameterized-queries + useLegacySQL := false + return &bigquery.JobConfigurationQuery{ + AllowLargeResults: custom.AllowLargeResults, + Clustering: custom.Clustering, + CreateDisposition: custom.CreateDisposition, + DefaultDataset: custom.DefaultDataset, + DestinationEncryptionConfiguration: custom.DestinationEncryptionConfiguration, + DestinationTable: custom.DestinationTable, + FlattenResults: custom.FlattenResults, + MaximumBillingTier: custom.MaximumBillingTier, + MaximumBytesBilled: custom.MaximumBytesBilled, + ParameterMode: "NAMED", + Priority: custom.Priority, + Query: custom.Query, + QueryParameters: queryParameters, + SchemaUpdateOptions: custom.SchemaUpdateOptions, + TableDefinitions: custom.TableDefinitions, + TimePartitioning: custom.TimePartitioning, + UseLegacySql: &useLegacySQL, + UseQueryCache: custom.UseQueryCache, + UserDefinedFunctionResources: custom.UserDefinedFunctionResources, + WriteDisposition: custom.WriteDisposition, + }, nil +} + +func getQueryParameters(literalMap map[string]*flyteIdlCore.Literal) ([]*bigquery.QueryParameter, error) { + queryParameters := make([]*bigquery.QueryParameter, len(literalMap)) + + i := 0 + for name, literal := range literalMap { + parameterType, parameterValue, err := getQueryParameter(literal) + + if err != nil { + return nil, err + } + + queryParameters[i] = &bigquery.QueryParameter{ + Name: name, + ParameterType: parameterType, + ParameterValue: parameterValue, + } + + i++ + } + + return queryParameters, nil +} + +// read more about parameterized queries: https://cloud.google.com/bigquery/docs/parameterized-queries + +func getQueryParameter(literal *flyteIdlCore.Literal) (*bigquery.QueryParameterType, *bigquery.QueryParameterValue, error) { + if scalar := literal.GetScalar(); scalar != nil { + if primitive := scalar.GetPrimitive(); primitive != nil { + switch primitive.Value.(type) { + case *flyteIdlCore.Primitive_Integer: + integerType := bigquery.QueryParameterType{Type: "INT64"} + integerValue := bigquery.QueryParameterValue{ + Value: strconv.FormatInt(primitive.GetInteger(), 10), + } + + return &integerType, &integerValue, nil + + case *flyteIdlCore.Primitive_StringValue: + stringType := bigquery.QueryParameterType{Type: "STRING"} + stringValue := bigquery.QueryParameterValue{ + Value: primitive.GetStringValue(), + } + + return &stringType, &stringValue, nil + + case *flyteIdlCore.Primitive_FloatValue: + floatType := bigquery.QueryParameterType{Type: "FLOAT64"} + floatValue := bigquery.QueryParameterValue{ + Value: strconv.FormatFloat(primitive.GetFloatValue(), 'f', -1, 64), + } + + return &floatType, &floatValue, nil + + case *flyteIdlCore.Primitive_Boolean: + boolType := bigquery.QueryParameterType{Type: "BOOL"} + + if primitive.GetBoolean() { + return &boolType, &bigquery.QueryParameterValue{ + Value: "TRUE", + }, nil + } + + return &boolType, &bigquery.QueryParameterValue{ + Value: "FALSE", + }, nil + } + } + } + + return nil, nil, pluginErrors.Errorf(pluginErrors.BadTaskSpecification, "unsupported literal [%v]", literal) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job_test.go b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job_test.go new file mode 100644 index 0000000000..ea21beb962 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/bigquery/query_job_test.go @@ -0,0 +1,84 @@ +package bigquery + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "google.golang.org/api/bigquery/v2" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" +) + +func TestGetQueryParameter(t *testing.T) { + t.Run("get integer parameter", func(t *testing.T) { + literal, _ := coreutils.MakePrimitiveLiteral(42) + + tpe, value, err := getQueryParameter(literal) + + assert.NoError(t, err) + assert.Equal(t, bigquery.QueryParameterType{Type: "INT64"}, *tpe) + assert.Equal(t, bigquery.QueryParameterValue{Value: "42"}, *value) + }) + + t.Run("get string parameter", func(t *testing.T) { + literal, _ := coreutils.MakePrimitiveLiteral("abc") + + tpe, value, err := getQueryParameter(literal) + + assert.NoError(t, err) + assert.Equal(t, bigquery.QueryParameterType{Type: "STRING"}, *tpe) + assert.Equal(t, bigquery.QueryParameterValue{Value: "abc"}, *value) + }) + + t.Run("get float parameter", func(t *testing.T) { + literal, _ := coreutils.MakePrimitiveLiteral(42.5) + + tpe, value, err := getQueryParameter(literal) + + assert.NoError(t, err) + assert.Equal(t, bigquery.QueryParameterType{Type: "FLOAT64"}, *tpe) + assert.Equal(t, bigquery.QueryParameterValue{Value: "42.5"}, *value) + }) + + t.Run("get true parameter", func(t *testing.T) { + literal, _ := coreutils.MakePrimitiveLiteral(true) + + tpe, value, err := getQueryParameter(literal) + + assert.NoError(t, err) + assert.Equal(t, bigquery.QueryParameterType{Type: "BOOL"}, *tpe) + assert.Equal(t, bigquery.QueryParameterValue{Value: "TRUE"}, *value) + }) + + t.Run("get false parameter", func(t *testing.T) { + literal, _ := coreutils.MakePrimitiveLiteral(false) + + tpe, value, err := getQueryParameter(literal) + + assert.NoError(t, err) + assert.Equal(t, bigquery.QueryParameterType{Type: "BOOL"}, *tpe) + assert.Equal(t, bigquery.QueryParameterValue{Value: "FALSE"}, *value) + }) +} + +func TestGetJobConfigurationQuery(t *testing.T) { + t.Run("get job configuration query", func(t *testing.T) { + config := QueryJobConfig{} + inputs, _ := coreutils.MakeLiteralMap(map[string]interface{}{ + "integer": 42, + }) + + jobConfigurationQuery, err := getJobConfigurationQuery(&config, inputs) + useLegacySQL := false + + assert.NoError(t, err) + assert.Equal(t, "NAMED", jobConfigurationQuery.ParameterMode) + assert.Equal(t, &useLegacySQL, jobConfigurationQuery.UseLegacySql) + assert.Equal(t, 1, len(jobConfigurationQuery.QueryParameters)) + assert.Equal(t, bigquery.QueryParameter{ + Name: "integer", + ParameterType: &bigquery.QueryParameterType{Type: "INT64"}, + ParameterValue: &bigquery.QueryParameterValue{Value: "42"}, + }, *jobConfigurationQuery.QueryParameters[0]) + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/config.go b/flyteplugins/go/tasks/plugins/webapi/databricks/config.go new file mode 100644 index 0000000000..1930b61a57 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/config.go @@ -0,0 +1,77 @@ +package databricks + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +var ( + defaultCluster = "COMPUTE_CLUSTER" + tokenKey = "FLYTE_DATABRICKS_API_TOKEN" // nolint: gosec + + defaultConfig = Config{ + WebAPI: webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "default": 1000, + }, + ReadRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + Caching: webapi.CachingConfig{ + Size: 500000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + MaxSystemFailures: 5, + }, + ResourceMeta: nil, + }, + ResourceConstraints: core.ResourceConstraintsSpec{ + ProjectScopeResourceConstraint: &core.ResourceConstraint{ + Value: 100, + }, + NamespaceScopeResourceConstraint: &core.ResourceConstraint{ + Value: 50, + }, + }, + DefaultCluster: defaultCluster, + TokenKey: tokenKey, + } + + configSection = pluginsConfig.MustRegisterSubSection("databricks", &defaultConfig) +) + +// Config is config for 'databricks' plugin +type Config struct { + // WebAPI defines config for the base WebAPI plugin + WebAPI webapi.PluginConfig `json:"webApi" pflag:",Defines config for the base WebAPI plugin."` + + // ResourceConstraints defines resource constraints on how many executions to be created per project/overall at any given time + ResourceConstraints core.ResourceConstraintsSpec `json:"resourceConstraints" pflag:"-,Defines resource constraints on how many executions to be created per project/overall at any given time."` + + DefaultCluster string `json:"defaultWarehouse" pflag:",Defines the default warehouse to use when running on Databricks unless overwritten by the task."` + + TokenKey string `json:"databricksTokenKey" pflag:",Name of the key where to find Databricks token in the secret manager."` + + DatabricksInstance string `json:"databricksInstance" pflag:",Databricks workspace instance name."` + + EntrypointFile string `json:"entrypointFile" pflag:",A URL of the entrypoint file. DBFS and cloud storage (s3://, gcs://, adls://, etc) locations are supported."` + // databricksEndpoint overrides databricks instance endpoint, only for testing + databricksEndpoint string +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/config_test.go b/flyteplugins/go/tasks/plugins/webapi/databricks/config_test.go new file mode 100644 index 0000000000..46cee89e28 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/config_test.go @@ -0,0 +1,18 @@ +package databricks + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGetAndSetConfig(t *testing.T) { + cfg := defaultConfig + cfg.DefaultCluster = "test-cluster" + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + assert.Equal(t, &cfg, GetConfig()) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/integration_test.go b/flyteplugins/go/tasks/plugins/webapi/databricks/integration_test.go new file mode 100644 index 0000000000..a5ca87f923 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/integration_test.go @@ -0,0 +1,150 @@ +package databricks + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/tests" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" + "github.com/flyteorg/flyte/v2/flytestdlib/utils" + coreIdl "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +func TestEndToEnd(t *testing.T) { + server := newFakeDatabricksServer() + defer server.Close() + + iter := func(ctx context.Context, tCtx pluginCore.TaskExecutionContext) error { + return nil + } + + cfg := defaultConfig + cfg.databricksEndpoint = server.URL + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + + pluginEntry := pluginmachinery.CreateRemotePlugin(newDatabricksJobTaskPlugin()) + plugin, err := pluginEntry.LoadPlugin(context.TODO(), newFakeSetupContext()) + assert.NoError(t, err) + + t.Run("run a databricks job by new_cluster key", func(t *testing.T) { + databricksConfDict := map[string]interface{}{ + "name": "flytekit databricks plugin example", + "new_cluster": map[string]string{ + "spark_version": "11.0.x-scala2.12", + "node_type_id": "r3.xlarge", + "num_workers": "4", + }, + "timeout_seconds": 3600, + "max_retries": 1, + } + databricksConfig, err := utils.MarshalObjToStruct(databricksConfDict) + assert.NoError(t, err) + sparkJob := plugins.SparkJob{DatabricksConf: databricksConfig, DatabricksToken: "token", SparkConf: map[string]string{"spark.driver.bindAddress": "127.0.0.1"}} + st, err := utils.MarshalPbToStruct(&sparkJob) + assert.NoError(t, err) + inputs, _ := coreutils.MakeLiteralMap(map[string]interface{}{"x": 1}) + template := flyteIdlCore.TaskTemplate{ + Type: "databricks", + Custom: st, + Target: &coreIdl.TaskTemplate_Container{ + Container: &coreIdl.Container{ + Command: []string{"command"}, + Args: []string{"pyflyte-execute"}, + }, + }, + } + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) + + t.Run("run a databricks job by new_cluster key", func(t *testing.T) { + databricksConfDict := map[string]interface{}{ + "name": "flytekit databricks plugin example", + "existing_cluster_id": "1201-my-cluster", + "timeout_seconds": 3600, + "max_retries": 1, + } + databricksConfig, err := utils.MarshalObjToStruct(databricksConfDict) + assert.NoError(t, err) + sparkJob := plugins.SparkJob{DatabricksConf: databricksConfig, DatabricksToken: "token", SparkConf: map[string]string{"spark.driver.bindAddress": "127.0.0.1"}} + st, err := utils.MarshalPbToStruct(&sparkJob) + assert.NoError(t, err) + inputs, _ := coreutils.MakeLiteralMap(map[string]interface{}{"x": 1}) + template := flyteIdlCore.TaskTemplate{ + Type: "databricks", + Custom: st, + Target: &coreIdl.TaskTemplate_Container{ + Container: &coreIdl.Container{ + Command: []string{"command"}, + Args: []string{"pyflyte-execute"}, + }, + }, + } + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) +} + +func newFakeDatabricksServer() *httptest.Server { + runID := "065168461" + jobID := "019e7546" + return httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + if request.URL.Path == fmt.Sprintf("%v/submit", databricksAPI) && request.Method == http.MethodPost { + writer.WriteHeader(http.StatusOK) + bytes := []byte(fmt.Sprintf(`{ + "run_id": "%v" + }`, runID)) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == fmt.Sprintf("%v/get", databricksAPI) && request.Method == http.MethodGet { + writer.WriteHeader(http.StatusOK) + bytes := []byte(fmt.Sprintf(`{ + "job_id": "%v", + "state": {"state_message": "execution in progress.", "life_cycle_state": "TERMINATED", "result_state": "SUCCESS"} + }`, jobID)) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == fmt.Sprintf("%v/cancel", databricksAPI) && request.Method == http.MethodPost { + writer.WriteHeader(http.StatusOK) + return + } + + writer.WriteHeader(http.StatusInternalServerError) + })) +} + +func newFakeSetupContext() *pluginCoreMocks.SetupContext { + fakeResourceRegistrar := pluginCoreMocks.ResourceRegistrar{} + fakeResourceRegistrar.On("RegisterResourceQuota", mock.Anything, mock.Anything, mock.Anything).Return(nil) + labeled.SetMetricKeys(contextutils.NamespaceKey) + + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test")) + fakeSetupContext.OnResourceRegistrar().Return(&fakeResourceRegistrar) + + return &fakeSetupContext +} diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go new file mode 100644 index 0000000000..34ca70b043 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin.go @@ -0,0 +1,347 @@ +package databricks + +import ( + "bytes" + "context" + "encoding/gob" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + pluginErrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/ioutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/utils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +const ( + create string = "create" + get string = "get" + cancel string = "cancel" + databricksAPI string = "/api/2.1/jobs/runs" + newCluster string = "new_cluster" + dockerImage string = "docker_image" + sparkConfig string = "spark_conf" + sparkPythonTask string = "spark_python_task" + pythonFile string = "python_file" + parameters string = "parameters" + url string = "url" +) + +// HTTPClient for mocking/testing purposes, and we'll override this method +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type Plugin struct { + metricScope promutils.Scope + cfg *Config + client HTTPClient +} + +type ResourceWrapper struct { + StatusCode int + LifeCycleState string + ResultState string + JobID string + Message string +} + +type ResourceMetaWrapper struct { + RunID string + DatabricksInstance string + Token string +} + +func (p Plugin) GetConfig() webapi.PluginConfig { + return GetConfig().WebAPI +} + +func (p Plugin) ResourceRequirements(_ context.Context, _ webapi.TaskExecutionContextReader) ( + namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) { + + // Resource requirements are assumed to be the same. + return "default", p.cfg.ResourceConstraints, nil +} + +func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextReader) (webapi.ResourceMeta, + webapi.Resource, error) { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, nil, err + } + + token, err := taskCtx.SecretManager().Get(ctx, p.cfg.TokenKey) + if err != nil { + return nil, nil, err + } + + container := taskTemplate.GetContainer() + sparkJob := plugins.SparkJob{} + err = utils.UnmarshalStruct(taskTemplate.GetCustom(), &sparkJob) + if err != nil { + return nil, nil, errors.Wrapf(pluginErrors.BadTaskSpecification, err, "invalid TaskSpecification [%v], failed to unmarshal", taskTemplate.GetCustom()) + } + + // override the default token in propeller + if len(sparkJob.DatabricksToken) != 0 { + token = sparkJob.DatabricksToken + } + modifiedArgs, err := template.Render(ctx, container.GetArgs(), template.Parameters{ + TaskExecMetadata: taskCtx.TaskExecutionMetadata(), + Inputs: taskCtx.InputReader(), + OutputPath: taskCtx.OutputWriter(), + Task: taskCtx.TaskReader(), + }) + if err != nil { + return nil, nil, err + } + + databricksJob := make(map[string]interface{}) + err = utils.UnmarshalStructToObj(sparkJob.DatabricksConf, &databricksJob) + if err != nil { + return nil, nil, fmt.Errorf("failed to unmarshal databricksJob: %v: %v", sparkJob.DatabricksConf, err) + } + + // If "existing_cluster_id" is in databricks_job, then we don't need to set "new_cluster" + // Refer the docs here: https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#request-structure + if clusterConfig, ok := databricksJob[newCluster].(map[string]interface{}); ok { + if dockerConfig, ok := clusterConfig[dockerImage].(map[string]interface{}); !ok || dockerConfig[url] == nil { + clusterConfig[dockerImage] = map[string]string{url: container.Image} + } + + if clusterConfig[sparkConfig] == nil && len(sparkJob.SparkConf) != 0 { + clusterConfig[sparkConfig] = sparkJob.SparkConf + } + } + databricksJob[sparkPythonTask] = map[string]interface{}{pythonFile: p.cfg.EntrypointFile, parameters: modifiedArgs} + + data, err := p.sendRequest(create, databricksJob, token, "") + if err != nil { + return nil, nil, err + } + + if _, ok := data["run_id"]; !ok { + return nil, nil, errors.Errorf("CorruptedPluginState", "can't get the run_id") + } + runID := fmt.Sprintf("%.0f", data["run_id"]) + + return ResourceMetaWrapper{runID, p.cfg.DatabricksInstance, token}, nil, nil +} + +func (p Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest webapi.Resource, err error) { + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + res, err := p.sendRequest(get, nil, exec.Token, exec.RunID) + if err != nil { + return nil, err + } + if _, ok := res["state"]; !ok { + return nil, errors.Errorf("CorruptedPluginState", "can't get the job state") + } + jobState := res["state"].(map[string]interface{}) + jobID := fmt.Sprintf("%.0f", res["job_id"]) + message := fmt.Sprintf("%s", jobState["state_message"]) + lifeCycleState := fmt.Sprintf("%s", jobState["life_cycle_state"]) + var resultState string + if _, ok := jobState["result_state"]; !ok { + // The result_state is not available until the job is finished. + // https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#runresultstate + resultState = "" + } else { + resultState = fmt.Sprintf("%s", jobState["result_state"]) + } + return ResourceWrapper{ + JobID: jobID, + LifeCycleState: lifeCycleState, + ResultState: resultState, + Message: message, + }, nil +} + +func (p Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error { + if taskCtx.ResourceMeta() == nil { + return nil + } + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + _, err := p.sendRequest(cancel, nil, exec.Token, exec.RunID) + if err != nil { + return err + } + logger.Info(ctx, "Deleted Databricks job execution.") + + return nil +} + +func (p Plugin) sendRequest(method string, databricksJob map[string]interface{}, token string, runID string) (map[string]interface{}, error) { + var databricksURL string + // for mocking/testing purposes + if p.cfg.databricksEndpoint == "" { + databricksURL = fmt.Sprintf("https://%v%v", p.cfg.DatabricksInstance, databricksAPI) + } else { + databricksURL = fmt.Sprintf("%v%v", p.cfg.databricksEndpoint, databricksAPI) + } + + // build the request spec + var body io.Reader + var httpMethod string + switch method { + case create: + databricksURL += "/submit" + mJSON, err := json.Marshal(databricksJob) + if err != nil { + return nil, fmt.Errorf("failed to marshal the job request: %v", err) + } + body = bytes.NewBuffer(mJSON) + httpMethod = http.MethodPost + case get: + databricksURL += "/get?run_id=" + runID + httpMethod = http.MethodGet + case cancel: + databricksURL += "/cancel" + body = bytes.NewBuffer([]byte(fmt.Sprintf("{ \"run_id\": %v }", runID))) + httpMethod = http.MethodPost + } + + req, err := http.NewRequest(httpMethod, databricksURL, body) + if err != nil { + return nil, err + } + req.Header.Add("Authorization", "Bearer "+token) + req.Header.Add("Content-Type", "application/json") + + // Send the request + resp, err := p.client.Do(req) + if err != nil { + return nil, fmt.Errorf("failed to send request to Databricks platform with err: [%v]", err) + } + defer resp.Body.Close() + + // Parse the response body + responseBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + var data map[string]interface{} + + if len(responseBody) != 0 { + err = json.Unmarshal(responseBody, &data) + if err != nil { + return nil, fmt.Errorf("failed to parse response with err: [%v]", err) + } + } + + if resp.StatusCode != http.StatusOK { + message := "" + if v, ok := data["message"]; ok { + message = v.(string) + } + return nil, fmt.Errorf("failed to %v Databricks job with error [%v]", method, message) + } + return data, nil +} + +func (p Plugin) Status(ctx context.Context, taskCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + resource := taskCtx.Resource().(ResourceWrapper) + message := resource.Message + jobID := resource.JobID + lifeCycleState := resource.LifeCycleState + resultState := resource.ResultState + + taskInfo := createTaskInfo(exec.RunID, jobID, exec.DatabricksInstance) + switch lifeCycleState { + // Job response format. https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#runlifecyclestate + case "QUEUED": + return core.PhaseInfoQueued(time.Now(), core.DefaultPhaseVersion, message), nil + case "PENDING": + return core.PhaseInfoInitializing(time.Now(), core.DefaultPhaseVersion, message, taskInfo), nil + case "RUNNING": + fallthrough + case "BLOCKED": + fallthrough + case "WAITING_FOR_RETRY": + fallthrough + case "TERMINATING": + return core.PhaseInfoRunning(core.DefaultPhaseVersion, taskInfo), nil + case "TERMINATED": + if resultState == "SUCCESS" { + // Result state details. https://docs.databricks.com/en/workflows/jobs/jobs-2.0-api.html#runresultstate + if err := writeOutput(ctx, taskCtx); err != nil { + return core.PhaseInfoFailure(string(rune(http.StatusInternalServerError)), "failed to write output", taskInfo), nil + } + return core.PhaseInfoSuccess(taskInfo), nil + } else if resultState == "FAILED" { + return core.PhaseInfoRetryableFailure("job failed", message, taskInfo), nil + } + return core.PhaseInfoFailure(pluginErrors.TaskFailedWithError, message, taskInfo), nil + case "SKIPPED": + return core.PhaseInfoFailure(string(rune(http.StatusConflict)), message, taskInfo), nil + case "INTERNAL_ERROR": + return core.PhaseInfoRetryableFailure(string(rune(http.StatusInternalServerError)), message, taskInfo), nil + } + return core.PhaseInfoUndefined, pluginErrors.Errorf(pluginsCore.SystemErrorCode, "unknown execution phase [%v].", lifeCycleState) +} + +func writeOutput(ctx context.Context, taskCtx webapi.StatusContext) error { + taskTemplate, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return err + } + if taskTemplate.Interface == nil || taskTemplate.Interface.Outputs == nil || taskTemplate.Interface.Outputs.Variables == nil { + logger.Infof(ctx, "The task declares no outputs. Skipping writing the outputs.") + return nil + } + + outputReader := ioutils.NewRemoteFileOutputReader(ctx, taskCtx.DataStore(), taskCtx.OutputWriter(), 0) + return taskCtx.OutputWriter().Put(ctx, outputReader) +} + +func createTaskInfo(runID, jobID, databricksInstance string) *core.TaskInfo { + timeNow := time.Now() + + return &core.TaskInfo{ + OccurredAt: &timeNow, + Logs: []*flyteIdlCore.TaskLog{ + { + Uri: fmt.Sprintf("https://%s/#job/%s/run/%s", + databricksInstance, + jobID, + runID), + Name: "Databricks Console", + }, + }, + } +} + +func newDatabricksJobTaskPlugin() webapi.PluginEntry { + return webapi.PluginEntry{ + ID: "databricks", + SupportedTaskTypes: []core.TaskType{"spark"}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + return &Plugin{ + metricScope: iCtx.MetricsScope(), + cfg: GetConfig(), + client: &http.Client{}, + }, nil + }, + } +} + +func init() { + gob.Register(ResourceMetaWrapper{}) + gob.Register(ResourceWrapper{}) + + pluginmachinery.PluginRegistry().RegisterRemotePlugin(newDatabricksJobTaskPlugin()) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go new file mode 100644 index 0000000000..6469b7510b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/databricks/plugin_test.go @@ -0,0 +1,154 @@ +package databricks + +import ( + "context" + "errors" + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/ioutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +type MockClient struct { + MockDo func(req *http.Request) (*http.Response, error) +} + +func (m MockClient) Do(req *http.Request) (*http.Response, error) { + return m.MockDo(req) +} + +var ( + testInstance = "test-account.cloud.databricks.com" +) + +func TestPlugin(t *testing.T) { + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test")) + + plugin := Plugin{ + metricScope: fakeSetupContext.MetricsScope(), + cfg: GetConfig(), + client: &MockClient{func(req *http.Request) (*http.Response, error) { + return nil, nil + }}, + } + t.Run("get config", func(t *testing.T) { + cfg := defaultConfig + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + assert.Equal(t, cfg.WebAPI, plugin.GetConfig()) + }) + t.Run("get ResourceRequirements", func(t *testing.T) { + namespace, constraints, err := plugin.ResourceRequirements(context.TODO(), nil) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.ResourceNamespace("default"), namespace) + assert.Equal(t, plugin.cfg.ResourceConstraints, constraints) + }) +} + +func TestSendRequest(t *testing.T) { + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test1")) + databricksJob := map[string]interface{}{"sparkConfig": map[string]interface{}{"sparkVersion": "7.3.x-scala2.12"}} + token := "token" + + plugin := Plugin{ + metricScope: fakeSetupContext.MetricsScope(), + cfg: GetConfig(), + client: &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodPost) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutils.NewBytesReadCloser([]byte(`{"id":"someID","data":"someData"}`)), + }, nil + }}, + } + + t.Run("create a Databricks job", func(t *testing.T) { + data, err := plugin.sendRequest(create, databricksJob, token, "") + assert.NotNil(t, data) + assert.Equal(t, "someID", data["id"]) + assert.Equal(t, "someData", data["data"]) + assert.Nil(t, err) + }) + + t.Run("failed to create a Databricks job", func(t *testing.T) { + plugin.client = &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodPost) + return &http.Response{ + StatusCode: http.StatusBadRequest, + Body: ioutils.NewBytesReadCloser([]byte(`{"message":"failed"}`)), + }, nil + }} + data, err := plugin.sendRequest(create, databricksJob, token, "") + assert.Nil(t, data) + assert.Equal(t, err.Error(), "failed to create Databricks job with error [failed]") + }) + + t.Run("failed to send request to Databricks", func(t *testing.T) { + plugin.client = &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodPost) + return nil, errors.New("failed to send request") + }} + data, err := plugin.sendRequest(create, databricksJob, token, "") + assert.Nil(t, data) + assert.Equal(t, err.Error(), "failed to send request to Databricks platform with err: [failed to send request]") + }) + + t.Run("failed to send request to Databricks", func(t *testing.T) { + plugin.client = &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodPost) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutils.NewBytesReadCloser([]byte(`123`)), + }, nil + }} + data, err := plugin.sendRequest(create, databricksJob, token, "") + assert.Nil(t, data) + assert.Equal(t, err.Error(), "failed to parse response with err: [json: cannot unmarshal number into Go value of type map[string]interface {}]") + }) + + t.Run("get a Databricks job", func(t *testing.T) { + plugin.client = &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodGet) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutils.NewBytesReadCloser([]byte(`{"message":"ok"}`)), + }, nil + }} + data, err := plugin.sendRequest(get, databricksJob, token, "") + assert.NotNil(t, data) + assert.Nil(t, err) + }) + + t.Run("cancel a Databricks job", func(t *testing.T) { + plugin.client = &MockClient{MockDo: func(req *http.Request) (*http.Response, error) { + assert.Equal(t, req.Method, http.MethodPost) + return &http.Response{ + StatusCode: http.StatusOK, + Body: ioutils.NewBytesReadCloser([]byte(`{"message":"ok"}`)), + }, nil + }} + data, err := plugin.sendRequest(cancel, databricksJob, token, "") + assert.NotNil(t, data) + assert.Nil(t, err) + }) +} + +func TestCreateTaskInfo(t *testing.T) { + t.Run("create task info", func(t *testing.T) { + taskInfo := createTaskInfo("run-id", "job-id", testInstance) + + assert.Equal(t, 1, len(taskInfo.Logs)) + assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.cloud.databricks.com/#job/job-id/run/run-id") + assert.Equal(t, taskInfo.Logs[0].Name, "Databricks Console") + }) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/config.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/config.go new file mode 100644 index 0000000000..cdc4383763 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/config.go @@ -0,0 +1,71 @@ +package snowflake + +import ( + "time" + + pluginsConfig "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/config" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/config" +) + +var ( + defaultConfig = Config{ + WebAPI: webapi.PluginConfig{ + ResourceQuotas: map[core.ResourceNamespace]int{ + "default": 1000, + }, + ReadRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + WriteRateLimiter: webapi.RateLimiterConfig{ + Burst: 100, + QPS: 10, + }, + Caching: webapi.CachingConfig{ + Size: 500000, + ResyncInterval: config.Duration{Duration: 30 * time.Second}, + Workers: 10, + MaxSystemFailures: 5, + }, + ResourceMeta: nil, + }, + ResourceConstraints: core.ResourceConstraintsSpec{ + ProjectScopeResourceConstraint: &core.ResourceConstraint{ + Value: 100, + }, + NamespaceScopeResourceConstraint: &core.ResourceConstraint{ + Value: 50, + }, + }, + DefaultWarehouse: "COMPUTE_WH", + TokenKey: "FLYTE_SNOWFLAKE_CLIENT_TOKEN", + } + + configSection = pluginsConfig.MustRegisterSubSection("snowflake", &defaultConfig) +) + +// Config is config for 'snowflake' plugin +type Config struct { + // WebAPI defines config for the base WebAPI plugin + WebAPI webapi.PluginConfig `json:"webApi" pflag:",Defines config for the base WebAPI plugin."` + + // ResourceConstraints defines resource constraints on how many executions to be created per project/overall at any given time + ResourceConstraints core.ResourceConstraintsSpec `json:"resourceConstraints" pflag:"-,Defines resource constraints on how many executions to be created per project/overall at any given time."` + + DefaultWarehouse string `json:"defaultWarehouse" pflag:",Defines the default warehouse to use when running on Snowflake unless overwritten by the task."` + + TokenKey string `json:"snowflakeTokenKey" pflag:",Name of the key where to find Snowflake token in the secret manager."` + + // snowflakeEndpoint overrides Snowflake client endpoint, only for testing + snowflakeEndpoint string +} + +func GetConfig() *Config { + return configSection.GetConfig().(*Config) +} + +func SetConfig(cfg *Config) error { + return configSection.SetConfig(cfg) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/config_test.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/config_test.go new file mode 100644 index 0000000000..5d972ea15b --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/config_test.go @@ -0,0 +1,18 @@ +package snowflake + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestGetAndSetConfig(t *testing.T) { + cfg := defaultConfig + cfg.DefaultWarehouse = "test-warehouse" + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + assert.Equal(t, &cfg, GetConfig()) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/integration_test.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/integration_test.go new file mode 100644 index 0000000000..893e049756 --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/integration_test.go @@ -0,0 +1,108 @@ +package snowflake + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + + "github.com/flyteorg/flyte/v2/flyteidl2/clients/go/coreutils" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/tests" + "github.com/flyteorg/flyte/v2/flytestdlib/contextutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils/labeled" + coreIdl "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func TestEndToEnd(t *testing.T) { + server := newFakeSnowflakeServer() + defer server.Close() + + iter := func(ctx context.Context, tCtx pluginCore.TaskExecutionContext) error { + return nil + } + + cfg := defaultConfig + cfg.snowflakeEndpoint = server.URL + cfg.DefaultWarehouse = "test-warehouse" + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + + pluginEntry := pluginmachinery.CreateRemotePlugin(newSnowflakeJobTaskPlugin()) + plugin, err := pluginEntry.LoadPlugin(context.TODO(), newFakeSetupContext()) + assert.NoError(t, err) + + t.Run("SELECT 1", func(t *testing.T) { + config := make(map[string]string) + config["database"] = "my-database" + config["account"] = "snowflake" + config["schema"] = "my-schema" + config["warehouse"] = "my-warehouse" + + inputs, _ := coreutils.MakeLiteralMap(map[string]interface{}{"x": 1}) + template := flyteIdlCore.TaskTemplate{ + Type: "snowflake", + Config: config, + Target: &coreIdl.TaskTemplate_Sql{Sql: &coreIdl.Sql{Statement: "SELECT 1", Dialect: coreIdl.Sql_ANSI}}, + } + + phase := tests.RunPluginEndToEndTest(t, plugin, &template, inputs, nil, nil, iter) + + assert.Equal(t, true, phase.Phase().IsSuccess()) + }) +} + +func newFakeSnowflakeServer() *httptest.Server { + statementHandle := "019e7546-0000-278c-0000-40f10001a082" + return httptest.NewServer(http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + if request.URL.Path == "/api/v2/statements" && request.Method == "POST" { + writer.WriteHeader(202) + bytes := []byte(fmt.Sprintf(`{ + "statementHandle": "%v", + "message": "Asynchronous execution in progress." + }`, statementHandle)) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == "/api/v2/statements/"+statementHandle && request.Method == "GET" { + writer.WriteHeader(200) + bytes := []byte(fmt.Sprintf(`{ + "statementHandle": "%v", + "message": "Statement executed successfully." + }`, statementHandle)) + _, _ = writer.Write(bytes) + return + } + + if request.URL.Path == "/api/v2/statements/"+statementHandle+"/cancel" && request.Method == "POST" { + writer.WriteHeader(200) + return + } + + writer.WriteHeader(500) + })) +} + +func newFakeSetupContext() *pluginCoreMocks.SetupContext { + fakeResourceRegistrar := pluginCoreMocks.ResourceRegistrar{} + fakeResourceRegistrar.On("RegisterResourceQuota", mock.Anything, mock.Anything, mock.Anything).Return(nil) + labeled.SetMetricKeys(contextutils.NamespaceKey) + + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test")) + fakeSetupContext.OnResourceRegistrar().Return(&fakeResourceRegistrar) + + return &fakeSetupContext +} diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go new file mode 100644 index 0000000000..d5fbcf2cfc --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin.go @@ -0,0 +1,291 @@ +package snowflake + +import ( + "bytes" + "context" + "encoding/gob" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + errors2 "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + pluginErrors "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/errors" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/template" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/webapi" + "github.com/flyteorg/flyte/v2/flytestdlib/errors" + "github.com/flyteorg/flyte/v2/flytestdlib/logger" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + flyteIdlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +const ( + ErrSystem errors.ErrorCode = "System" + post string = "POST" + get string = "GET" +) + +// for mocking/testing purposes, and we'll override this method +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type Plugin struct { + metricScope promutils.Scope + cfg *Config + client HTTPClient +} + +type ResourceWrapper struct { + StatusCode int + Message string +} + +type ResourceMetaWrapper struct { + QueryID string + Account string + Token string +} + +func (p Plugin) GetConfig() webapi.PluginConfig { + return GetConfig().WebAPI +} + +type QueryInfo struct { + Account string + Warehouse string + Schema string + Database string + Statement string +} + +func (p Plugin) ResourceRequirements(_ context.Context, _ webapi.TaskExecutionContextReader) ( + namespace core.ResourceNamespace, constraints core.ResourceConstraintsSpec, err error) { + + // Resource requirements are assumed to be the same. + return "default", p.cfg.ResourceConstraints, nil +} + +func (p Plugin) Create(ctx context.Context, taskCtx webapi.TaskExecutionContextReader) (webapi.ResourceMeta, + webapi.Resource, error) { + task, err := taskCtx.TaskReader().Read(ctx) + if err != nil { + return nil, nil, err + } + + token, err := taskCtx.SecretManager().Get(ctx, p.cfg.TokenKey) + if err != nil { + return nil, nil, err + } + config := task.GetConfig() + + outputs, err := template.Render(ctx, []string{ + task.GetSql().Statement, + }, template.Parameters{ + TaskExecMetadata: taskCtx.TaskExecutionMetadata(), + Inputs: taskCtx.InputReader(), + OutputPath: taskCtx.OutputWriter(), + Task: taskCtx.TaskReader(), + }) + if err != nil { + return nil, nil, err + } + queryInfo := QueryInfo{ + Account: config["account"], + Warehouse: config["warehouse"], + Schema: config["schema"], + Database: config["database"], + Statement: outputs[0], + } + + if len(queryInfo.Warehouse) == 0 { + queryInfo.Warehouse = p.cfg.DefaultWarehouse + } + if len(queryInfo.Account) == 0 { + return nil, nil, errors.Errorf(errors2.BadTaskSpecification, "Account must not be empty.") + } + if len(queryInfo.Database) == 0 { + return nil, nil, errors.Errorf(errors2.BadTaskSpecification, "Database must not be empty.") + } + req, err := buildRequest(post, queryInfo, p.cfg.snowflakeEndpoint, + config["account"], token, "", false) + if err != nil { + return nil, nil, err + } + resp, err := p.client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + data, err := buildResponse(resp) + if err != nil { + return nil, nil, err + } + + if data["statementHandle"] == "" { + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, + "Unable to fetch statementHandle from http response") + } + if data["message"] == "" { + return nil, nil, pluginErrors.Wrapf(pluginErrors.RuntimeFailure, err, + "Unable to fetch message from http response") + } + queryID := fmt.Sprintf("%v", data["statementHandle"]) + message := fmt.Sprintf("%v", data["message"]) + + return ResourceMetaWrapper{queryID, queryInfo.Account, token}, + ResourceWrapper{StatusCode: resp.StatusCode, Message: message}, nil +} + +func (p Plugin) Get(ctx context.Context, taskCtx webapi.GetContext) (latest webapi.Resource, err error) { + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + req, err := buildRequest(get, QueryInfo{}, p.cfg.snowflakeEndpoint, + exec.Account, exec.Token, exec.QueryID, false) + if err != nil { + return nil, err + } + resp, err := p.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + data, err := buildResponse(resp) + if err != nil { + return nil, err + } + message := fmt.Sprintf("%v", data["message"]) + return ResourceWrapper{ + StatusCode: resp.StatusCode, + Message: message, + }, nil +} + +func (p Plugin) Delete(ctx context.Context, taskCtx webapi.DeleteContext) error { + if taskCtx.ResourceMeta() == nil { + return nil + } + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + req, err := buildRequest(post, QueryInfo{}, p.cfg.snowflakeEndpoint, + exec.Account, exec.Token, exec.QueryID, true) + if err != nil { + return err + } + resp, err := p.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + logger.Infof(ctx, "Deleted query execution [%v]", resp) + + return nil +} + +func (p Plugin) Status(_ context.Context, taskCtx webapi.StatusContext) (phase core.PhaseInfo, err error) { + exec := taskCtx.ResourceMeta().(ResourceMetaWrapper) + statusCode := taskCtx.Resource().(ResourceWrapper).StatusCode + if statusCode == 0 { + return core.PhaseInfoUndefined, errors.Errorf(ErrSystem, "No Status field set.") + } + + taskInfo := createTaskInfo(exec.QueryID, exec.Account) + switch statusCode { + case http.StatusAccepted: + return core.PhaseInfoRunning(pluginsCore.DefaultPhaseVersion, createTaskInfo(exec.QueryID, exec.Account)), nil + case http.StatusOK: + return pluginsCore.PhaseInfoSuccess(taskInfo), nil + case http.StatusUnprocessableEntity: + return pluginsCore.PhaseInfoFailure(string(rune(statusCode)), "phaseReason", taskInfo), nil + } + return core.PhaseInfoUndefined, pluginErrors.Errorf(pluginsCore.SystemErrorCode, "unknown execution phase [%v].", statusCode) +} + +func buildRequest(method string, queryInfo QueryInfo, snowflakeEndpoint string, account string, token string, + queryID string, isCancel bool) (*http.Request, error) { + var snowflakeURL string + // for mocking/testing purposes + if snowflakeEndpoint == "" { + snowflakeURL = "https://" + account + ".snowflakecomputing.com/api/v2/statements" + } else { + snowflakeURL = snowflakeEndpoint + "/api/v2/statements" + } + + var data []byte + if method == post && !isCancel { + snowflakeURL += "?async=true" + data = []byte(fmt.Sprintf(`{ + "statement": "%v", + "database": "%v", + "schema": "%v", + "warehouse": "%v" + }`, queryInfo.Statement, queryInfo.Database, queryInfo.Schema, queryInfo.Warehouse)) + } else { + snowflakeURL += "/" + queryID + } + if isCancel { + snowflakeURL += "/cancel" + } + + req, err := http.NewRequest(method, snowflakeURL, bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + req.Header.Add("Authorization", "Bearer "+token) + req.Header.Add("X-Snowflake-Authorization-Token-Type", "KEYPAIR_JWT") + req.Header.Add("Content-Type", "application/json") + req.Header.Add("Accept", "application/json") + return req, nil +} + +func buildResponse(response *http.Response) (map[string]interface{}, error) { + responseBody, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, err + } + var data map[string]interface{} + err = json.Unmarshal(responseBody, &data) + if err != nil { + return nil, err + } + return data, nil +} + +func createTaskInfo(queryID string, account string) *core.TaskInfo { + timeNow := time.Now() + + return &core.TaskInfo{ + OccurredAt: &timeNow, + Logs: []*flyteIdlCore.TaskLog{ + { + Uri: fmt.Sprintf("https://%v.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=%v", + account, + queryID), + Name: "Snowflake Console", + }, + }, + } +} + +func newSnowflakeJobTaskPlugin() webapi.PluginEntry { + return webapi.PluginEntry{ + ID: "snowflake", + SupportedTaskTypes: []core.TaskType{"snowflake"}, + PluginLoader: func(ctx context.Context, iCtx webapi.PluginSetupContext) (webapi.AsyncPlugin, error) { + return Plugin{ + metricScope: iCtx.MetricsScope(), + cfg: GetConfig(), + client: &http.Client{}, + }, nil + }, + } +} + +func init() { + gob.Register(ResourceMetaWrapper{}) + gob.Register(ResourceWrapper{}) + + pluginmachinery.PluginRegistry().RegisterRemotePlugin(newSnowflakeJobTaskPlugin()) +} diff --git a/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go new file mode 100644 index 0000000000..5f2227a33d --- /dev/null +++ b/flyteplugins/go/tasks/plugins/webapi/snowflake/plugin_test.go @@ -0,0 +1,123 @@ +package snowflake + +import ( + "context" + "encoding/json" + "io/ioutil" + "net/http" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + pluginsCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + pluginCoreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" +) + +type MockClient struct { +} + +var ( + MockDo func(req *http.Request) (*http.Response, error) +) + +func (m *MockClient) Do(req *http.Request) (*http.Response, error) { + return MockDo(req) +} + +func TestPlugin(t *testing.T) { + fakeSetupContext := pluginCoreMocks.SetupContext{} + fakeSetupContext.OnMetricsScope().Return(promutils.NewScope("test")) + + plugin := Plugin{ + metricScope: fakeSetupContext.MetricsScope(), + cfg: GetConfig(), + client: &MockClient{}, + } + t.Run("get config", func(t *testing.T) { + cfg := defaultConfig + cfg.WebAPI.Caching.Workers = 1 + cfg.WebAPI.Caching.ResyncInterval.Duration = 5 * time.Second + err := SetConfig(&cfg) + assert.NoError(t, err) + assert.Equal(t, cfg.WebAPI, plugin.GetConfig()) + }) + t.Run("get ResourceRequirements", func(t *testing.T) { + namespace, constraints, err := plugin.ResourceRequirements(context.TODO(), nil) + assert.NoError(t, err) + assert.Equal(t, pluginsCore.ResourceNamespace("default"), namespace) + assert.Equal(t, plugin.cfg.ResourceConstraints, constraints) + }) +} + +func TestCreateTaskInfo(t *testing.T) { + t.Run("create task info", func(t *testing.T) { + taskInfo := createTaskInfo("d5493e36", "test-account") + + assert.Equal(t, 1, len(taskInfo.Logs)) + assert.Equal(t, taskInfo.Logs[0].Uri, "https://test-account.snowflakecomputing.com/console#/monitoring/queries/detail?queryId=d5493e36") + assert.Equal(t, taskInfo.Logs[0].Name, "Snowflake Console") + }) +} + +func TestBuildRequest(t *testing.T) { + account := "test-account" + token := "test-token" + queryID := "019e70eb-0000-278b-0000-40f100012b1a" + snowflakeEndpoint := "" + snowflakeURL := "https://" + account + ".snowflakecomputing.com/api/v2/statements" + t.Run("build http request for submitting a snowflake query", func(t *testing.T) { + queryInfo := QueryInfo{ + Account: account, + Warehouse: "test-warehouse", + Schema: "test-schema", + Database: "test-database", + Statement: "SELECT 1", + } + + req, err := buildRequest(post, queryInfo, snowflakeEndpoint, account, token, queryID, false) + header := http.Header{} + header.Add("Authorization", "Bearer "+token) + header.Add("X-Snowflake-Authorization-Token-Type", "KEYPAIR_JWT") + header.Add("Content-Type", "application/json") + header.Add("Accept", "application/json") + + assert.NoError(t, err) + assert.Equal(t, header, req.Header) + assert.Equal(t, snowflakeURL+"?async=true", req.URL.String()) + assert.Equal(t, post, req.Method) + }) + t.Run("build http request for getting a snowflake query status", func(t *testing.T) { + req, err := buildRequest(get, QueryInfo{}, snowflakeEndpoint, account, token, queryID, false) + + assert.NoError(t, err) + assert.Equal(t, snowflakeURL+"/"+queryID, req.URL.String()) + assert.Equal(t, get, req.Method) + }) + t.Run("build http request for deleting a snowflake query", func(t *testing.T) { + req, err := buildRequest(post, QueryInfo{}, snowflakeEndpoint, account, token, queryID, true) + + assert.NoError(t, err) + assert.Equal(t, snowflakeURL+"/"+queryID+"/cancel", req.URL.String()) + assert.Equal(t, post, req.Method) + }) +} + +func TestBuildResponse(t *testing.T) { + t.Run("build http response", func(t *testing.T) { + bodyStr := `{"statementHandle":"019c06a4-0000", "message":"Statement executed successfully."}` + responseBody := ioutil.NopCloser(strings.NewReader(bodyStr)) + response := &http.Response{Body: responseBody} + actualData, err := buildResponse(response) + assert.NoError(t, err) + + bodyByte, err := ioutil.ReadAll(strings.NewReader(bodyStr)) + assert.NoError(t, err) + var expectedData map[string]interface{} + err = json.Unmarshal(bodyByte, &expectedData) + assert.NoError(t, err) + assert.Equal(t, expectedData, actualData) + }) +} diff --git a/flyteplugins/go/tasks/testdata/config.yaml b/flyteplugins/go/tasks/testdata/config.yaml new file mode 100755 index 0000000000..59d6a99c96 --- /dev/null +++ b/flyteplugins/go/tasks/testdata/config.yaml @@ -0,0 +1,94 @@ +# Sample plugins config +plugins: + # All k8s plugins default configuration + k8s: + inject-finalizer: true + default-annotations: + - annotationKey1: annotationValue1 + - annotationKey2: annotationValue2 + default-labels: + - label1: labelValue1 + - label2: labelValue2 + resource-tolerations: + nvidia.com/gpu: + key: flyte/gpu + value: dedicated + operator: Equal + effect: NoSchedule + storage: + - key: storage + value: special + operator: Equal + effect: PreferNoSchedule + interruptible-node-selector: + - x/interruptible: "true" + interruptible-tolerations: + - key: x/flyte + value: interruptible + operator: Equal + effect: NoSchedule + default-env-vars: + - AWS_METADATA_SERVICE_TIMEOUT: 5 + - AWS_METADATA_SERVICE_NUM_ATTEMPTS: 20 + - FLYTE_AWS_ENDPOINT: "http://minio.flyte:9000" + - FLYTE_AWS_ACCESS_KEY_ID: minio + - FLYTE_AWS_SECRET_ACCESS_KEY: miniostorage + default-pod-security-context: + runAsUser: 1000 + runAsGroup: 3000 + fsGroup: 2000 + default-security-context: + allowPrivilegeEscalation: false + enable-host-networking-pod: true + default-pod-dns-config: + options: + - name: "ndots" + value: "1" + - name: "single-request-reopen" + - name: "timeout" + value: "1" + - name: "attempts" + value: "3" + nameservers: + - "8.8.8.8" + - "8.8.4.4" + searches: + - "ns1.svc.cluster-domain.example" + - "my.dns.search.suffix" + + # Spark Plugin configuration + spark: + spark-config-default: + - spark.hadoop.mapreduce.fileoutputcommitter.algorithm.version: "2" + - spark.kubernetes.allocation.batch.size: "50" + - spark.hadoop.fs.s3a.acl.default: "BucketOwnerFullControl" + - spark.hadoop.fs.s3n.impl: "org.apache.hadoop.fs.s3a.S3AFileSystem" + - spark.hadoop.fs.AbstractFileSystem.s3n.impl: "org.apache.hadoop.fs.s3a.S3A" + - spark.hadoop.fs.s3.impl: "org.apache.hadoop.fs.s3a.S3AFileSystem" + - spark.hadoop.fs.AbstractFileSystem.s3.impl: "org.apache.hadoop.fs.s3a.S3A" + - spark.hadoop.fs.s3a.impl: "org.apache.hadoop.fs.s3a.S3AFileSystem" + - spark.hadoop.fs.AbstractFileSystem.s3a.impl: "org.apache.hadoop.fs.s3a.S3A" + - spark.hadoop.fs.s3a.multipart.threshold: "536870912" + - spark.blacklist.enabled: "true" + - spark.blacklist.timeout: "5m" + features: + - name: "feature1" + spark-config: + - spark.hadoop.feature1: "true" + - spark.sql.feature1: "true" + - name: "feature2" + spark-config: + - spark.hadoop.feature2: "true" + - spark.sql.feature2: "true" + logs: + mixed: + kubernetes-enabled: true + kubernetes-url: "http://localhost:30082" + # Logging configuration + logs: + kubernetes-enabled: true + kubernetes-url: "http://localhost:30082" + azure-log-templates: + - displayName: "Test Azure Logs" + templateUris: + - "https://portal.azure.com#@TEST_AZURE_URI/q/" diff --git a/flyteplugins/go/tasks/testdata/incorrect-config.yaml b/flyteplugins/go/tasks/testdata/incorrect-config.yaml new file mode 100755 index 0000000000..719b6113d7 --- /dev/null +++ b/flyteplugins/go/tasks/testdata/incorrect-config.yaml @@ -0,0 +1,7 @@ +# Sample plugins config +plugins: + # Logging configuration + logs: + config: + kubernetes-enabled: true + kubernetes-url: "http://localhost:30082" diff --git a/flyteplugins/tests/end_to_end.go b/flyteplugins/tests/end_to_end.go new file mode 100644 index 0000000000..e1fddd670c --- /dev/null +++ b/flyteplugins/tests/end_to_end.go @@ -0,0 +1,292 @@ +package tests + +import ( + "context" + "encoding/json" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/go-test/deep" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v12 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/rand" + + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog" + catalogMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/catalog/mocks" + pluginCore "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core" + coreMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/core/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io" + ioMocks "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/io/mocks" + "github.com/flyteorg/flyte/v2/flyteplugins/go/tasks/pluginmachinery/workqueue" + "github.com/flyteorg/flyte/v2/flytestdlib/promutils" + "github.com/flyteorg/flyte/v2/flytestdlib/storage" + "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/common" + idlCore "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +func createSampleContainerTask() *idlCore.Container { + return &idlCore.Container{ + Command: []string{"cmd"}, + Args: []string{"{{$inputPrefix}}"}, + Image: "img1", + Config: []*idlCore.KeyValuePair{ + { + Key: "dynamic_queue", + Value: "queue1", + }, + }, + } +} + +func BuildTaskTemplate() *idlCore.TaskTemplate { + return &idlCore.TaskTemplate{ + Target: &idlCore.TaskTemplate_Container{ + Container: createSampleContainerTask(), + }, + } +} + +func RunPluginEndToEndTest(t *testing.T, executor pluginCore.Plugin, template *idlCore.TaskTemplate, + inputs *idlCore.LiteralMap, expectedOutputs *idlCore.LiteralMap, expectedFailure *idlCore.ExecutionError, + iterationUpdate func(ctx context.Context, tCtx pluginCore.TaskExecutionContext) error) pluginCore.PhaseInfo { + + ctx := context.Background() + + ds, err := storage.NewDataStore(&storage.Config{Type: storage.TypeMemory}, promutils.NewTestScope()) + assert.NoError(t, err) + + execID := rand.String(3) + + basePrefix := storage.DataReference("fake://bucket/prefix/" + execID) + assert.NoError(t, ds.WriteProtobuf(ctx, basePrefix+"/inputs.pb", storage.Options{}, inputs)) + + tr := &coreMocks.TaskReader{} + tr.OnRead(ctx).Return(template, nil) + + inputReader := &ioMocks.InputReader{} + inputReader.OnGetInputPrefixPath().Return(basePrefix) + inputReader.OnGetInputPath().Return(basePrefix + "/inputs.pb") + inputReader.OnGetMatch(mock.Anything).Return(inputs, nil) + + outputWriter := &ioMocks.OutputWriter{} + outputWriter.OnGetRawOutputPrefix().Return("/sandbox/") + outputWriter.OnGetOutputPrefixPath().Return(basePrefix) + outputWriter.OnGetErrorPath().Return(basePrefix + "/error.pb") + outputWriter.OnGetOutputPath().Return(basePrefix + "/outputs.pb") + outputWriter.OnGetCheckpointPrefix().Return("/checkpoint") + outputWriter.OnGetPreviousCheckpointsPrefix().Return("/prev") + + outputWriter.OnPutMatch(mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + or := args.Get(1).(io.OutputReader) + literals, ee, err := or.Read(ctx) + assert.NoError(t, err) + + if ee != nil { + assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetErrorPath(), storage.Options{}, ee)) + } + + if literals != nil { + assert.NoError(t, ds.WriteProtobuf(ctx, outputWriter.GetOutputPath(), storage.Options{}, literals)) + } + }) + + pluginStateWriter := &coreMocks.PluginStateWriter{} + latestKnownState := atomic.Value{} + pluginStateWriter.OnPutMatch(mock.Anything, mock.Anything).Return(nil).Run(func(args mock.Arguments) { + latestKnownState.Store(args.Get(1)) + }) + + pluginStateWriter.OnReset().Return(nil).Run(func(args mock.Arguments) { + latestKnownState.Store(nil) + }) + + pluginStateReader := &coreMocks.PluginStateReader{} + pluginStateReader.OnGetMatch(mock.Anything).Return(0, nil).Run(func(args mock.Arguments) { + o := args.Get(0) + x, err := json.Marshal(latestKnownState.Load()) + assert.NoError(t, err) + assert.NoError(t, json.Unmarshal(x, &o)) + }) + pluginStateReader.OnGetStateVersion().Return(0) + + tID := &coreMocks.TaskExecutionID{} + tID.OnGetGeneratedName().Return(execID + "-my-task-1") + tID.OnGetID().Return(idlCore.TaskExecutionIdentifier{ + TaskId: &idlCore.Identifier{ + ResourceType: idlCore.ResourceType_TASK, + Project: "a", + Domain: "d", + Name: "n", + Version: "abc", + }, + NodeExecutionId: &idlCore.NodeExecutionIdentifier{ + NodeId: "node1", + ExecutionId: &idlCore.WorkflowExecutionIdentifier{ + Project: "a", + Domain: "d", + Name: "exec", + }, + }, + RetryAttempt: 0, + }) + tID.OnGetUniqueNodeID().Return("unique-node-id") + + overrides := &coreMocks.TaskOverrides{} + overrides.OnGetConfigMap().Return(&v1.ConfigMap{Data: map[string]string{ + "dynamic-queue": "queue1", + }}) + overrides.OnGetResources().Return(&v1.ResourceRequirements{ + Requests: map[v1.ResourceName]resource.Quantity{}, + Limits: map[v1.ResourceName]resource.Quantity{}, + }) + overrides.OnGetExtendedResources().Return(&idlCore.ExtendedResources{}) + overrides.OnGetContainerImage().Return("") + overrides.OnGetPodTemplate().Return(nil) + + connections := map[string]pluginCore.ConnectionWrapper{ + "my-openai": { + Connection: idlCore.Connection{ + TaskType: "openai", + Secrets: map[string]string{"key": "value"}, + Configs: map[string]string{"key": "value"}, + }, + Source: common.AttributesSource_GLOBAL, + }, + } + + tMeta := &coreMocks.TaskExecutionMetadata{} + tMeta.OnGetTaskExecutionID().Return(tID) + tMeta.OnGetOverrides().Return(overrides) + tMeta.OnGetK8sServiceAccount().Return("s") + tMeta.OnGetNamespace().Return("fake-development") + tMeta.OnGetMaxAttempts().Return(2) + tMeta.OnGetSecurityContext().Return(idlCore.SecurityContext{ + RunAs: &idlCore.Identity{ + K8SServiceAccount: "s", + }, + }) + tMeta.OnGetLabels().Return(map[string]string{"organization": "flyte", "project": "flytesnacks", "domain": "development"}) + tMeta.OnGetAnnotations().Return(map[string]string{}) + tMeta.OnIsInterruptible().Return(true) + tMeta.OnGetOwnerReference().Return(v12.OwnerReference{}) + tMeta.OnGetOwnerID().Return(types.NamespacedName{ + Namespace: "fake-development", + Name: execID, + }) + tMeta.OnGetPlatformResources().Return(&v1.ResourceRequirements{}) + tMeta.OnGetInterruptibleFailureThreshold().Return(2) + tMeta.OnGetEnvironmentVariables().Return(nil) + tMeta.OnGetExternalResourceAttributes().Return(pluginCore.ExternalResourceAttributes{Connections: connections}) + tMeta.OnGetConsoleURL().Return("") + + catClient := &catalogMocks.Client{} + catData := sync.Map{} + catClient.On("Get", mock.Anything, mock.Anything).Return( + func(ctx context.Context, key catalog.Key) io.OutputReader { + data, found := catData.Load(key) + if !found { + return nil + } + + or := &ioMocks.OutputReader{} + or.OnExistsMatch(mock.Anything).Return(true, nil) + or.OnIsErrorMatch(mock.Anything).Return(false, nil) + or.OnReadMatch(mock.Anything).Return(data.(*idlCore.LiteralMap), nil, nil) + return or + }, + func(ctx context.Context, key catalog.Key) error { + _, found := catData.Load(key) + if !found { + return status.Error(codes.NotFound, "No output found for key") + } + + return nil + }) + catClient.On(mock.Anything, mock.Anything, mock.Anything, mock.Anything). + Return(nil). + Run(func(args mock.Arguments) { + key := args.Get(1).(catalog.Key) + or := args.Get(2).(io.OutputReader) + o, ee, err := or.Read(ctx) + assert.NoError(t, err) + // TODO: Outputting error is not yet supported. + assert.Nil(t, ee) + catData.Store(key, o) + }) + cat, err := catalog.NewAsyncClient(catClient, catalog.Config{ + ReaderWorkqueueConfig: workqueue.Config{ + MaxRetries: 0, + Workers: 2, + IndexCacheMaxItems: 100, + }, + WriterWorkqueueConfig: workqueue.Config{ + MaxRetries: 0, + Workers: 2, + IndexCacheMaxItems: 100, + }, + }, promutils.NewTestScope()) + assert.NoError(t, err) + assert.NoError(t, cat.Start(ctx)) + + eRecorder := &coreMocks.EventsRecorder{} + eRecorder.OnRecordRawMatch(mock.Anything, mock.Anything).Return(nil) + + resourceManager := &coreMocks.ResourceManager{} + resourceManager.OnAllocateResourceMatch(mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(pluginCore.AllocationStatusGranted, nil) + resourceManager.OnReleaseResourceMatch(mock.Anything, mock.Anything, mock.Anything).Return(nil) + + secretManager := &coreMocks.SecretManager{} + secretManager.OnGet(ctx, mock.Anything).Return("fake-token", nil) + + tCtx := &coreMocks.TaskExecutionContext{} + tCtx.OnInputReader().Return(inputReader) + tCtx.OnTaskRefreshIndicator().Return(func(ctx context.Context) {}) + tCtx.OnOutputWriter().Return(outputWriter) + tCtx.OnDataStore().Return(ds) + tCtx.OnTaskReader().Return(tr) + tCtx.OnPluginStateWriter().Return(pluginStateWriter) + tCtx.OnPluginStateReader().Return(pluginStateReader) + tCtx.OnTaskExecutionMetadata().Return(tMeta) + tCtx.OnCatalog().Return(cat) + tCtx.OnEventsRecorder().Return(eRecorder) + tCtx.OnResourceManager().Return(resourceManager) + tCtx.OnSecretManager().Return(secretManager) + + trns := pluginCore.DoTransition(pluginCore.PhaseInfoQueued(time.Now(), 0, "")) + for !trns.Info().Phase().IsTerminal() { + trns, err = executor.Handle(ctx, tCtx) + assert.NoError(t, err) + if iterationUpdate != nil { + assert.NoError(t, iterationUpdate(ctx, tCtx)) + } + } + + assert.NoError(t, err) + if expectedOutputs != nil { + assert.True(t, trns.Info().Phase().IsSuccess()) + actualOutputs := &idlCore.LiteralMap{} + assert.NoError(t, ds.ReadProtobuf(context.TODO(), outputWriter.GetOutputPath(), actualOutputs)) + + if diff := deep.Equal(expectedOutputs, actualOutputs); diff != nil { + t.Errorf("Expected != Actual. Diff: %v", diff) + } + } else if expectedFailure != nil { + assert.True(t, trns.Info().Phase().IsFailure()) + actualError := &idlCore.ExecutionError{} + assert.NoError(t, ds.ReadProtobuf(context.TODO(), outputWriter.GetErrorPath(), actualError)) + + if diff := deep.Equal(expectedFailure, actualError); diff != nil { + t.Errorf("Expected != Actual. Diff: %v", diff) + } + } + + return trns.Info() +} diff --git a/flytestdlib/cli/pflags/api/generator.go b/flytestdlib/cli/pflags/api/generator.go deleted file mode 100644 index 520c4879f3..0000000000 --- a/flytestdlib/cli/pflags/api/generator.go +++ /dev/null @@ -1,693 +0,0 @@ -package api - -import ( - "context" - "fmt" - "go/types" - "path/filepath" - "strings" - - "github.com/ernesto-jimenez/gogen/gogenutil" - "golang.org/x/tools/go/packages" - - "github.com/flyteorg/flyte/v2/flytestdlib/logger" -) - -const ( - indent = " " -) - -// PFlagProviderGenerator parses and generates GetPFlagSet implementation to add PFlags for a given struct's fields. -type PFlagProviderGenerator struct { - pkg *types.Package - st *types.Named - defaultVar *types.Var - shouldBindDefaultVar bool -} - -// This list is restricted because that's the only kinds viper parses out, otherwise it assumes strings. -// github.com/spf13/viper/viper.go:1016 -var allowedKinds = []types.Type{ - types.Typ[types.Int], - types.Typ[types.Int8], - types.Typ[types.Int16], - types.Typ[types.Int32], - types.Typ[types.Int64], - types.Typ[types.Bool], - types.Typ[types.String], - types.NewMap(types.Typ[types.String], types.Typ[types.String]), -} - -type SliceOrArray interface { - Elem() types.Type -} - -func capitalize(s string) string { - if s[0] >= 'a' && s[0] <= 'z' { - return string(s[0]-'a'+'A') + s[1:] - } - - return s -} - -func buildFieldForSlice(ctx context.Context, t SliceOrArray, name, goName, usage, defaultValue string, bindDefaultVar bool) (FieldInfo, error) { - strategy := Raw - FlagMethodName := "StringSlice" - typ := types.NewSlice(types.Typ[types.String]) - emptyDefaultValue := `[]string{}` - if b, ok := t.Elem().(*types.Basic); !ok { - logger.Infof(ctx, "Elem of type [%v] is not a basic type. It must be json unmarshalable or generation will fail.", t.Elem()) - if !isJSONUnmarshaler(t.Elem()) { - return FieldInfo{}, - fmt.Errorf("slice of type [%v] is not supported. Only basic slices or slices of json-unmarshalable types are supported", - t.Elem().String()) - } - } else { - logger.Infof(ctx, "Elem of type [%v] is a basic type. Will use a pflag as a Slice.", b) - strategy = SliceJoined - FlagMethodName = fmt.Sprintf("%vSlice", capitalize(b.Name())) - typ = types.NewSlice(b) - emptyDefaultValue = fmt.Sprintf(`[]%v{}`, b.Name()) - } - - testValue := defaultValue - if len(defaultValue) == 0 { - defaultValue = emptyDefaultValue - testValue = `"1,1"` - } - - return FieldInfo{ - Name: name, - GoName: goName, - Typ: typ, - FlagMethodName: FlagMethodName, - TestFlagMethodName: FlagMethodName, - DefaultValue: defaultValue, - UsageString: usage, - TestValue: testValue, - TestStrategy: strategy, - ShouldBindDefault: bindDefaultVar, - }, nil -} - -func buildFieldForMap(ctx context.Context, t *types.Map, name, goName, usage, defaultValue string, bindDefaultVar bool) (FieldInfo, error) { - strategy := Raw - FlagMethodName := "StringToString" - typ := types.NewMap(types.Typ[types.String], types.Typ[types.String]) - emptyDefaultValue := `nil` - if k, ok := t.Key().(*types.Basic); !ok || k.Kind() != types.String { - logger.Infof(ctx, "Key of type [%v] is not a basic type. It must be json unmarshalable or generation will fail.", t.Elem()) - } else if v, valueOk := t.Elem().(*types.Basic); !valueOk && !isJSONUnmarshaler(t.Elem()) { - return FieldInfo{}, - fmt.Errorf("map of type [%v] is not supported. Only basic slices or slices of json-unmarshalable types are supported", - t.Elem().String()) - } else { - logger.Infof(ctx, "Map[%v]%v is supported. using pflag maps.", k, t.Elem()) - strategy = Raw - if valueOk { - FlagMethodName = fmt.Sprintf("StringTo%v", capitalize(v.Name())) - typ = types.NewMap(k, v) - emptyDefaultValue = fmt.Sprintf(`map[%v]%v{}`, k.Name(), v.Name()) - } else { - // Value is not a basic type. Rely on json marshaling to unmarshal it - /* #nosec */ - FlagMethodName = "StringToString" - } - } - - if len(defaultValue) == 0 { - defaultValue = emptyDefaultValue - } - - testValue := `"a=1,b=2"` - - return FieldInfo{ - Name: name, - GoName: goName, - Typ: typ, - FlagMethodName: FlagMethodName, - TestFlagMethodName: FlagMethodName, - DefaultValue: defaultValue, - UsageString: usage, - TestValue: testValue, - TestStrategy: strategy, - ShouldBindDefault: bindDefaultVar, - ShouldTestDefault: false, - }, nil -} - -// Appends field accessors using "." as the delimiter. -// e.g. appendAccessors("var1", "field1", "subField") will output "var1.field1.subField" -func appendAccessors(accessors ...string) string { - sb := strings.Builder{} - switch len(accessors) { - case 0: - return "" - case 1: - return accessors[0] - } - - for _, s := range accessors { - if len(s) > 0 { - if sb.Len() > 0 { - if _, err := sb.WriteString("."); err != nil { - fmt.Printf("Failed to writeString, error: %v", err) - return "" - } - } - - if _, err := sb.WriteString(s); err != nil { - fmt.Printf("Failed to writeString, error: %v", err) - return "" - } - } - } - - return sb.String() -} - -func pflagValueTypesToList(m map[string]PFlagValueType) []PFlagValueType { - l := make([]PFlagValueType, 0, len(m)) - for _, v := range m { - l = append(l, v) - } - - return l -} - -// Traverses fields in type and follows recursion tree to discover all fields. It stops when one of two conditions is -// met; encountered a basic type (e.g. string, int... etc.) or the field type implements UnmarshalJSON. -// If passed a non-empty defaultValueAccessor, it'll be used to fill in default values instead of any default value -// specified in pflag tag. -func discoverFieldsRecursive(ctx context.Context, workingDirPkg string, typ interface { - Obj() *types.TypeName - Underlying() types.Type -}, defaultValueAccessor, fieldPath string, bindDefaultVar bool) ([]FieldInfo, []PFlagValueType, error) { - logger.Printf(ctx, "Finding all fields in [%v.%v.%v]", - typ.Obj().Pkg().Path(), typ.Obj().Pkg().Name(), typ.Obj().Name()) - - ctx = logger.WithIndent(ctx, indent) - - st := typ.Underlying().(*types.Struct) - fields := make([]FieldInfo, 0, st.NumFields()) - pflagValueTypes := make(map[string]PFlagValueType, st.NumFields()) - addField := func(typ types.Type, f FieldInfo) { - if _, isNamed := typ.(*types.Named); isNamed && bindDefaultVar { - hasPFlagValueImpl := isPFlagValue(typ) - if hasPFlagValueImpl { - f.FlagMethodName = "" - } else { - f.ShouldBindDefault = false - } - } - - fields = append(fields, f) - } - for i := 0; i < st.NumFields(); i++ { - variable := st.Field(i) - if !variable.IsField() { - continue - } - - // Parses out the tag if one exists. - tag, err := ParseTag(st.Tag(i)) - if err != nil { - return nil, nil, err - } - - if len(tag.Name) == 0 { - tag.Name = variable.Name() - } - - if tag.DefaultValue == "-" { - logger.Infof(ctx, "Skipping field [%s], as '-' value detected", tag.Name) - continue - } - - typ := variable.Type() - ptr, isPtr := typ.(*types.Pointer) - if isPtr { - typ = ptr.Elem() - } - - switch t := typ.(type) { - case *types.Basic: - f, err := buildBasicField(ctx, tag, t, defaultValueAccessor, fieldPath, variable, false, false, isPtr, bindDefaultVar, nil) - if err != nil { - return fields, pflagValueTypesToList(pflagValueTypes), err - } - - addField(typ, f) - case *types.Alias: - // For alias types, they will show up as Alias but their underlying type will be basic. - if b, isBasic := t.Underlying().(*types.Basic); isBasic { - f, err := buildBasicField(ctx, tag, b, defaultValueAccessor, fieldPath, variable, false, false, isPtr, bindDefaultVar, nil) - if err != nil { - return fields, pflagValueTypesToList(pflagValueTypes), err - } - - addField(typ, f) - break - } - - if _, isStruct := t.Underlying().(*types.Struct); !isStruct { - // TODO: Add a more descriptive error message. - return nil, []PFlagValueType{}, fmt.Errorf("invalid type. it must be struct, received [%v] for field [%v]", t.Underlying().String(), tag.Name) - } - - // If the type has json unmarshaler, then stop the recursion and assume the type is string. config package - // will use json unmarshaler to fill in the final config object. - jsonUnmarshaler := isJSONUnmarshaler(t) - - defaultValue := tag.DefaultValue - bindDefaultVarForField := bindDefaultVar - testValue := defaultValue - if len(defaultValueAccessor) > 0 { - defaultValue = appendAccessors(defaultValueAccessor, fieldPath, variable.Name()) - - if isStringer(t) { - if !bindDefaultVar { - defaultValue = defaultValue + ".String()" - testValue = defaultValue - } else { - testValue = defaultValue + ".String()" - } - - // Don't do anything, we will generate PFlagValue implementation to use this. - } else if isJSONMarshaler(t) { - logger.Infof(ctx, "Field [%v] of type [%v] does not implement Stringer interface."+ - " Will use %s.mustMarshalJSON() to get its default value.", defaultValueAccessor, variable.Name(), t.String()) - defaultValue = fmt.Sprintf("%s.mustMarshalJSON(%s)", defaultValueAccessor, defaultValue) - bindDefaultVarForField = false - testValue = defaultValue - } else { - logger.Infof(ctx, "Field [%v] of type [%v] does not implement Stringer interface."+ - " Will use %s.mustMarshalJSON() to get its default value.", defaultValueAccessor, variable.Name(), t.String()) - defaultValue = fmt.Sprintf("%s.mustJsonMarshal(%s)", defaultValueAccessor, defaultValue) - bindDefaultVarForField = false - testValue = defaultValue - } - } - - if len(testValue) == 0 { - testValue = `"1"` - } - - logger.Infof(ctx, "[%v] is of an Alias type (struct) with default value [%v].", tag.Name, tag.DefaultValue) - - if jsonUnmarshaler { - logger.Infof(logger.WithIndent(ctx, indent), "Type is json unmarshallable.") - - addField(typ, FieldInfo{ - Name: tag.Name, - GoName: variable.Name(), - Typ: types.Typ[types.String], - FlagMethodName: "String", - TestFlagMethodName: "String", - DefaultValue: defaultValue, - UsageString: tag.Usage, - TestValue: testValue, - TestStrategy: JSON, - ShouldBindDefault: bindDefaultVarForField, - LocalTypeName: t.Obj().Name(), - }) - } else { - logger.Infof(ctx, "Traversing fields in type.") - - nested, otherPflagValueTypes, err := discoverFieldsRecursive(logger.WithIndent(ctx, indent), workingDirPkg, t, defaultValueAccessor, appendAccessors(fieldPath, variable.Name()), bindDefaultVar) - if err != nil { - return nil, []PFlagValueType{}, err - } - - for _, subField := range nested { - addField(subField.Typ, FieldInfo{ - Name: fmt.Sprintf("%v.%v", tag.Name, subField.Name), - GoName: fmt.Sprintf("%v.%v", variable.Name(), subField.GoName), - Typ: subField.Typ, - FlagMethodName: subField.FlagMethodName, - TestFlagMethodName: subField.TestFlagMethodName, - DefaultValue: subField.DefaultValue, - UsageString: subField.UsageString, - TestValue: subField.TestValue, - TestStrategy: subField.TestStrategy, - ShouldBindDefault: bindDefaultVar, - LocalTypeName: subField.LocalTypeName, - }) - } - - for _, vType := range otherPflagValueTypes { - pflagValueTypes[vType.Name] = vType - } - } - case *types.Named: - // For named types, they will show up as Named but their underlying type will be basic. - if _, isBasic := t.Underlying().(*types.Basic); isBasic { - logger.Debugf(ctx, "type [%v] is a named basic type. Using buildNamedBasicField to generate it.", t.Obj().Name()) - f, err := buildNamedBasicField(ctx, workingDirPkg, tag, t, defaultValueAccessor, fieldPath, variable, isPtr, bindDefaultVar) - if err != nil { - return fields, []PFlagValueType{}, err - } - - addField(typ, f) - break - } - - if _, isStruct := t.Underlying().(*types.Struct); !isStruct { - // TODO: Add a more descriptive error message. - return nil, []PFlagValueType{}, fmt.Errorf("invalid type. it must be struct, received [%v] for field [%v]", t.Underlying().String(), tag.Name) - } - - // If the type has json unmarshaler, then stop the recursion and assume the type is string. config package - // will use json unmarshaler to fill in the final config object. - jsonUnmarshaler := isJSONUnmarshaler(t) - - defaultValue := tag.DefaultValue - bindDefaultVarForField := bindDefaultVar - testValue := defaultValue - if len(defaultValueAccessor) > 0 { - defaultValue = appendAccessors(defaultValueAccessor, fieldPath, variable.Name()) - - if isStringer(t) { - if !bindDefaultVar { - defaultValue = defaultValue + ".String()" - testValue = defaultValue - } else { - testValue = defaultValue + ".String()" - } - - // Don't do anything, we will generate PFlagValue implementation to use this. - } else if isJSONMarshaler(t) { - logger.Infof(ctx, "Field [%v] of type [%v] does not implement Stringer interface."+ - " Will use %s.mustMarshalJSON() to get its default value.", defaultValueAccessor, variable.Name(), t.String()) - defaultValue = fmt.Sprintf("%s.mustMarshalJSON(%s)", defaultValueAccessor, defaultValue) - bindDefaultVarForField = false - testValue = defaultValue - } else { - logger.Infof(ctx, "Field [%v] of type [%v] does not implement Stringer interface."+ - " Will use %s.mustMarshalJSON() to get its default value.", defaultValueAccessor, variable.Name(), t.String()) - defaultValue = fmt.Sprintf("%s.mustJsonMarshal(%s)", defaultValueAccessor, defaultValue) - bindDefaultVarForField = false - testValue = defaultValue - } - } - - if len(testValue) == 0 { - testValue = `"1"` - } - - logger.Infof(ctx, "[%v] is of a Named type (struct) with default value [%v].", tag.Name, tag.DefaultValue) - - if jsonUnmarshaler { - logger.Infof(logger.WithIndent(ctx, indent), "Type is json unmarshallable.") - - addField(typ, FieldInfo{ - Name: tag.Name, - GoName: variable.Name(), - Typ: types.Typ[types.String], - FlagMethodName: "String", - TestFlagMethodName: "String", - DefaultValue: defaultValue, - UsageString: tag.Usage, - TestValue: testValue, - TestStrategy: JSON, - ShouldBindDefault: bindDefaultVarForField, - LocalTypeName: t.Obj().Name(), - }) - } else { - logger.Infof(ctx, "Traversing fields in type.") - - nested, otherPflagValueTypes, err := discoverFieldsRecursive(logger.WithIndent(ctx, indent), workingDirPkg, t, defaultValueAccessor, appendAccessors(fieldPath, variable.Name()), bindDefaultVar) - if err != nil { - return nil, []PFlagValueType{}, err - } - - for _, subField := range nested { - addField(subField.Typ, FieldInfo{ - Name: fmt.Sprintf("%v.%v", tag.Name, subField.Name), - GoName: fmt.Sprintf("%v.%v", variable.Name(), subField.GoName), - Typ: subField.Typ, - FlagMethodName: subField.FlagMethodName, - TestFlagMethodName: subField.TestFlagMethodName, - DefaultValue: subField.DefaultValue, - UsageString: subField.UsageString, - TestValue: subField.TestValue, - TestStrategy: subField.TestStrategy, - ShouldBindDefault: bindDefaultVar, - LocalTypeName: subField.LocalTypeName, - }) - } - - for _, vType := range otherPflagValueTypes { - pflagValueTypes[vType.Name] = vType - } - } - case *types.Slice: - logger.Infof(ctx, "[%v] is of a slice type with default value [%v].", tag.Name, tag.DefaultValue) - defaultValue := tag.DefaultValue - if len(defaultValueAccessor) > 0 { - defaultValue = appendAccessors(defaultValueAccessor, fieldPath, variable.Name()) - } - - f, err := buildFieldForSlice(logger.WithIndent(ctx, indent), t, tag.Name, variable.Name(), tag.Usage, defaultValue, bindDefaultVar) - if err != nil { - return nil, []PFlagValueType{}, err - } - - addField(typ, f) - case *types.Array: - logger.Infof(ctx, "[%v] is of an array type with default value [%v].", tag.Name, tag.DefaultValue) - defaultValue := tag.DefaultValue - - f, err := buildFieldForSlice(logger.WithIndent(ctx, indent), t, tag.Name, variable.Name(), tag.Usage, defaultValue, bindDefaultVar) - if err != nil { - return nil, []PFlagValueType{}, err - } - - addField(typ, f) - case *types.Map: - logger.Infof(ctx, "[%v] is of a map type with default value [%v].", tag.Name, tag.DefaultValue) - defaultValue := tag.DefaultValue - if len(defaultValueAccessor) > 0 { - defaultValue = appendAccessors(defaultValueAccessor, fieldPath, variable.Name()) - } - - f, err := buildFieldForMap(logger.WithIndent(ctx, indent), t, tag.Name, variable.Name(), tag.Usage, defaultValue, bindDefaultVar) - if err != nil { - return nil, []PFlagValueType{}, err - } - - addField(typ, f) - default: - return nil, []PFlagValueType{}, fmt.Errorf("unexpected type %v", t.String()) - } - } - - return fields, pflagValueTypesToList(pflagValueTypes), nil -} - -// buildNamedBasicField builds FieldInfo for a NamedType that has an underlying basic type (e.g. `type Foo int`) -func buildNamedBasicField(ctx context.Context, workingDirPkg string, tag Tag, t *types.Named, defaultValueAccessor, fieldPath string, - v *types.Var, isPtr, bindDefaultVar bool) (FieldInfo, error) { - _, casted := t.Underlying().(*types.Basic) - if !casted { - return FieldInfo{}, fmt.Errorf("expected named type with an underlying basic type. Received [%v]", t.String()) - } - - if !isStringer(t) { - return FieldInfo{}, fmt.Errorf("type [%v] doesn't implement Stringer interface. If you are trying to declare an enum, make sure to run `enumer` on it", t.String()) - } - - if !isJSONUnmarshaler(t) { - return FieldInfo{}, fmt.Errorf("type [%v] doesn't implement JSONUnmarshaler interface. If you are trying to create an enum, make sure to run `enumer -json` on it", t.String()) - } - - hasPFlagValueImpl := isPFlagValue(t) - if !hasPFlagValueImpl && bindDefaultVar && t.Obj().Pkg().Path() != workingDirPkg { - return FieldInfo{}, fmt.Errorf("field [%v] of type [%v] from package [%v] does not implement PFlag's"+ - " Value interface and is not local to the package to generate an implementation for automatically. Either"+ - " disable bind-default-var for the type, disable pflag generation for this field or created a local"+ - " wrapper type", t.Obj().Name(), appendAccessors(fieldPath, v.Name()), t.Obj().Pkg().Path()) - } - - // We rely on `enumer` generation to convert string to value. If it's not implemented, fail - if !hasStringConstructor(t) { - typeName := t.Obj().Name() - return FieldInfo{}, fmt.Errorf("field [%v] of type [%v] from package [%v] doesn't have `enumer` run. "+ - "Add: //go:generate enumer --type=%s --trimPrefix=%s", typeName, appendAccessors(fieldPath, - v.Name()), t.Obj().Pkg().Path(), typeName, typeName) - } - - accessorWrapper := func(str string) string { - return fmt.Sprintf("%s.String()", str) - } - - if bindDefaultVar && hasPFlagValueImpl { - accessorWrapper = nil - } - - f, err := buildBasicField(ctx, tag, types.Typ[types.String], defaultValueAccessor, fieldPath, v, - hasPFlagValueImpl, true, isPtr, bindDefaultVar, accessorWrapper) - if err != nil { - return FieldInfo{}, err - } - - // Override the local type name to be the named type name. - f.LocalTypeName = t.Obj().Name() - return f, nil -} - -func buildBasicField(ctx context.Context, tag Tag, t *types.Basic, defaultValueAccessor, fieldPath string, - v *types.Var, isPFlagValue, isNamed, isPtr, bindDefaultVar bool, accessorWrapper func(string) string) (FieldInfo, error) { - - if len(tag.DefaultValue) == 0 { - tag.DefaultValue = fmt.Sprintf("*new(%v)", t.String()) - } - - logger.Infof(ctx, "[%v] is of a basic type with default value [%v].", tag.Name, tag.DefaultValue) - - isAllowed := false - for _, k := range allowedKinds { - if t.String() == k.String() { - isAllowed = true - break - } - } - - // If the type is a NamedType, we can generate interface implementation to make it work, so don't error here. - if !isAllowed && !isNamed { - return FieldInfo{}, fmt.Errorf("only these basic kinds are allowed. given [%v] (Kind: [%v]. expected: [%+v]", - t.String(), t.Kind(), allowedKinds) - } - - defaultValue := tag.DefaultValue - if len(defaultValueAccessor) > 0 { - defaultValue = appendAccessors(defaultValueAccessor, fieldPath, v.Name()) - if accessorWrapper != nil { - defaultValue = accessorWrapper(defaultValue) - } - - if isPtr { - defaultValue = fmt.Sprintf("%s.elemValueOrNil(%s).(%s)", defaultValueAccessor, defaultValue, t.Name()) - if bindDefaultVar { - logger.Warnf(ctx, "field [%v] is nullable. Will not bind default variable", defaultValue) - bindDefaultVar = false - } - } - } - - flagMethodName := camelCase(t.String()) - testFlagMethodName := flagMethodName - if isNamed && bindDefaultVar && isPFlagValue { - // The template automatically appends the word "Var" to the method name. - // The one we now want to use is just named "Var" so make this string empty to end up with the - // right method name. - flagMethodName = "" - } else if isNamed && bindDefaultVar { - bindDefaultVar = false - } - - return FieldInfo{ - Name: tag.Name, - GoName: v.Name(), - Typ: t, - FlagMethodName: flagMethodName, - TestFlagMethodName: testFlagMethodName, - DefaultValue: defaultValue, - UsageString: tag.Usage, - TestValue: `"1"`, - TestStrategy: JSON, - ShouldBindDefault: bindDefaultVar, - }, nil -} - -// NewGenerator initializes a PFlagProviderGenerator for pflags files for targetTypeName struct under pkg. If pkg is not filled in, -// it's assumed to be current package (which is expected to be the common use case when invoking pflags from `// go:generate comments)` -func NewGenerator(pkg, targetTypeName, defaultVariableName string, shouldBindDefaultVar bool) (*PFlagProviderGenerator, error) { - ctx := context.Background() - var err error - - // Resolve package path - if pkg == "" || pkg[0] == '.' { - pkg, err = filepath.Abs(filepath.Clean(pkg)) - if err != nil { - return nil, err - } - - pkg = gogenutil.StripGopath(pkg) - logger.InfofNoCtx("Loading package from path [%v]", pkg) - } - - targetPackage, err := loadPackage(pkg) - if err != nil { - return nil, err - } - - obj := targetPackage.Scope().Lookup(targetTypeName) - if obj == nil { - return nil, fmt.Errorf("struct %s missing", targetTypeName) - } - - var st *types.Named - switch obj.Type().Underlying().(type) { - case *types.Struct: - st = obj.Type().(*types.Named) - default: - return nil, fmt.Errorf("%s should be an struct, was %s", targetTypeName, obj.Type().Underlying()) - } - - var defaultVar *types.Var - obj = targetPackage.Scope().Lookup(defaultVariableName) - if obj != nil { - defaultVar = obj.(*types.Var) - } - - if defaultVar != nil { - logger.Infof(ctx, "Using default variable with name [%v] to assign all default values.", defaultVariableName) - } else { - logger.Infof(ctx, "Using default values defined in tags if any.") - } - - return &PFlagProviderGenerator{ - st: st, - pkg: targetPackage, - defaultVar: defaultVar, - shouldBindDefaultVar: shouldBindDefaultVar, - }, nil -} - -func loadPackage(pkg string) (*types.Package, error) { - config := &packages.Config{ - Mode: packages.NeedTypes | packages.NeedTypesInfo, - Logf: logger.InfofNoCtx, - } - - loadedPkgs, err := packages.Load(config, pkg) - if err != nil { - return nil, err - } - - if len(loadedPkgs) == 0 { - return nil, fmt.Errorf("No packages loaded") - } - - targetPackage := loadedPkgs[0].Types - return targetPackage, nil -} - -func (g PFlagProviderGenerator) GetTargetPackage() *types.Package { - return g.pkg -} - -func (g PFlagProviderGenerator) Generate(ctx context.Context) (PFlagProvider, error) { - defaultValueAccessor := "" - if g.defaultVar != nil { - defaultValueAccessor = g.defaultVar.Name() - } - - fields, pflagValueTypes, err := discoverFieldsRecursive(ctx, g.pkg.Path(), g.st, defaultValueAccessor, "", g.shouldBindDefaultVar) - if err != nil { - return PFlagProvider{}, err - } - - return newPflagProvider(g.pkg, g.st.Obj().Name(), fields, pflagValueTypes), nil -} diff --git a/flytestdlib/cli/pflags/api/generator_test.go b/flytestdlib/cli/pflags/api/generator_test.go deleted file mode 100644 index 037846b682..0000000000 --- a/flytestdlib/cli/pflags/api/generator_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package api - -import ( - "context" - "flag" - "go/token" - "go/types" - "os" - "path/filepath" - "reflect" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// Make sure existing config file(s) parse correctly before overriding them with this flag! -var update = flag.Bool("update", false, "Updates testdata") - -// If v is a pointer, it will get its element value or the zero value of the element type. -// If v is not a pointer, it will return it as is. -func elemValueOrNil(v interface{}) interface{} { - if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { - if reflect.ValueOf(v).IsNil() { - return reflect.Zero(t.Elem()).Interface() - } - - return reflect.ValueOf(v).Interface() - } else if v == nil { - return reflect.Zero(t).Interface() - } - - return v -} - -func TestElemValueOrNil(t *testing.T) { - var iPtr *int - assert.Equal(t, 0, elemValueOrNil(iPtr)) - var sPtr *string - assert.Equal(t, "", elemValueOrNil(sPtr)) - var i int - assert.Equal(t, 0, elemValueOrNil(i)) - var s string - assert.Equal(t, "", elemValueOrNil(s)) - var arr []string - assert.Equal(t, arr, elemValueOrNil(arr)) -} - -func TestNewGenerator(t *testing.T) { - testCases := []struct { - TypeName string - DefaultVariableName string - shouldBindDefaultVariable bool - }{ - { - TypeName: "TestType", - DefaultVariableName: "DefaultTestType", - shouldBindDefaultVariable: false, - }, - { - TypeName: "TestType", - DefaultVariableName: "DefaultTestType", - shouldBindDefaultVariable: true, - }, - } - - for _, typ := range testCases { - t.Run("Test "+typ.TypeName, func(t *testing.T) { - g, err := NewGenerator("github.com/flyteorg/flyte/v2/flytestdlib/cli/pflags/api", typ.TypeName, typ.DefaultVariableName, typ.shouldBindDefaultVariable) - if !assert.NoError(t, err) { - t.FailNow() - } - ctx := context.Background() - p, err := g.Generate(ctx) - if !assert.NoError(t, err) { - t.FailNow() - } - - codeOutput, err := os.CreateTemp("", "output-*.go") - if !assert.NoError(t, err) { - t.FailNow() - } - - defer func() { assert.NoError(t, os.Remove(codeOutput.Name())) }() - - testOutput, err := os.CreateTemp("", "output-*_test.go") - if !assert.NoError(t, err) { - t.FailNow() - } - - defer func() { assert.NoError(t, os.Remove(testOutput.Name())) }() - - assert.NoError(t, p.WriteCodeFile(codeOutput.Name())) - assert.NoError(t, p.WriteTestFile(testOutput.Name())) - - codeBytes, err := os.ReadFile(codeOutput.Name()) - assert.NoError(t, err) - - testBytes, err := os.ReadFile(testOutput.Name()) - assert.NoError(t, err) - - var goldenFilePath string - var goldenTestFilePath string - goldenFilePath = filepath.Join("testdata", strings.ToLower(typ.TypeName)+".go") - goldenTestFilePath = filepath.Join("testdata", strings.ToLower(typ.TypeName)+"_test.go") - if typ.shouldBindDefaultVariable { - goldenFilePath = filepath.Join("testdata", strings.ToLower(typ.TypeName)+"_bind.go") - goldenTestFilePath = filepath.Join("testdata", strings.ToLower(typ.TypeName)+"_bind_test.go") - } - - if *update { - assert.NoError(t, os.WriteFile(goldenFilePath, codeBytes, os.ModePerm)) // #nosec G306 - assert.NoError(t, os.WriteFile(goldenTestFilePath, testBytes, os.ModePerm)) // #nosec G306 - } - - goldenOutput, err := os.ReadFile(filepath.Clean(goldenFilePath)) - assert.NoError(t, err) - assert.Equal(t, string(goldenOutput), string(codeBytes)) - - goldenTestOutput, err := os.ReadFile(filepath.Clean(goldenTestFilePath)) - assert.NoError(t, err) - assert.Equal(t, string(goldenTestOutput), string(testBytes)) - }) - } - - t.Run("empty package", func(t *testing.T) { - gen, err := NewGenerator("", "TestType", "DefaultTestType", false) - assert.Nil(t, err) - assert.NotNil(t, gen.GetTargetPackage()) - }) -} - -func TestBuildFieldForMap(t *testing.T) { - t.Run("supported : StringToString", func(t *testing.T) { - ctx := context.Background() - key := types.Typ[types.String] - elem := types.Typ[types.String] - typesMap := types.NewMap(key, elem) - name := "m" - goName := "StringMap" - usage := "I'm a map of strings" - defaultValue := "DefaultValue" - fieldInfo, err := buildFieldForMap(ctx, typesMap, name, goName, usage, defaultValue, false) - assert.Nil(t, err) - assert.NotNil(t, fieldInfo) - assert.Equal(t, "StringToString", fieldInfo.FlagMethodName) - assert.Equal(t, defaultValue, fieldInfo.DefaultValue) - }) - t.Run("unsupported : not a string type map", func(t *testing.T) { - ctx := context.Background() - key := types.Typ[types.Bool] - elem := types.Typ[types.Bool] - typesMap := types.NewMap(key, elem) - name := "m" - goName := "BoolMap" - usage := "I'm a map of bools" - defaultValue := "" - fieldInfo, err := buildFieldForMap(ctx, typesMap, name, goName, usage, defaultValue, false) - assert.Nil(t, err) - assert.NotNil(t, fieldInfo) - assert.Equal(t, "StringToString", fieldInfo.FlagMethodName) - assert.Equal(t, "nil", fieldInfo.DefaultValue) - }) - t.Run("unsupported : elem not a basic type", func(t *testing.T) { - ctx := context.Background() - key := types.Typ[types.String] - elem := &types.Interface{} - typesMap := types.NewMap(key, elem) - name := "m" - goName := "InterfaceMap" - usage := "I'm a map of interface values" - defaultValue := "" - fieldInfo, err := buildFieldForMap(ctx, typesMap, name, goName, usage, defaultValue, false) - assert.NotNil(t, err) - assert.Contains(t, err.Error(), "is not supported."+ - " Only basic slices or slices of json-unmarshalable types are supported") - assert.NotNil(t, fieldInfo) - assert.Equal(t, "", fieldInfo.FlagMethodName) - assert.Equal(t, "", fieldInfo.DefaultValue) - }) - t.Run("supported : StringToFloat64", func(t *testing.T) { - ctx := context.Background() - key := types.Typ[types.String] - elem := types.Typ[types.Float64] - typesMap := types.NewMap(key, elem) - name := "m" - goName := "Float64Map" - usage := "I'm a map of float64" - defaultValue := "DefaultValue" - fieldInfo, err := buildFieldForMap(ctx, typesMap, name, goName, usage, defaultValue, false) - assert.Nil(t, err) - assert.NotNil(t, fieldInfo) - assert.Equal(t, "StringToFloat64", fieldInfo.FlagMethodName) - assert.Equal(t, defaultValue, fieldInfo.DefaultValue) - }) -} - -func TestDiscoverFieldsRecursive(t *testing.T) { - t.Run("empty struct", func(t *testing.T) { - ctx := context.Background() - defaultValueAccessor := "defaultAccessor" - fieldPath := "field.Path" - pkg := types.NewPackage("p", "p") - n1 := types.NewTypeName(token.NoPos, pkg, "T1", nil) - namedTypes := types.NewNamed(n1, new(types.Struct), nil) - //namedTypes := types.NewNamed(n1, nil, nil) - fields, _, err := discoverFieldsRecursive(ctx, "p", namedTypes, defaultValueAccessor, fieldPath, false) - assert.Nil(t, err) - assert.Equal(t, len(fields), 0) - }) -} diff --git a/flytestdlib/cli/pflags/api/namedtype_enumer.go b/flytestdlib/cli/pflags/api/namedtype_enumer.go deleted file mode 100644 index ffceef622d..0000000000 --- a/flytestdlib/cli/pflags/api/namedtype_enumer.go +++ /dev/null @@ -1,67 +0,0 @@ -// Code generated by "enumer --type=NamedType --trimprefix=NamedType -json"; DO NOT EDIT. - -package api - -import ( - "encoding/json" - "fmt" -) - -const _NamedTypeName = "AB" - -var _NamedTypeIndex = [...]uint8{0, 1, 2} - -func (i NamedType) String() string { - if i < 0 || i >= NamedType(len(_NamedTypeIndex)-1) { - return fmt.Sprintf("NamedType(%d)", i) - } - return _NamedTypeName[_NamedTypeIndex[i]:_NamedTypeIndex[i+1]] -} - -var _NamedTypeValues = []NamedType{0, 1} - -var _NamedTypeNameToValueMap = map[string]NamedType{ - _NamedTypeName[0:1]: 0, - _NamedTypeName[1:2]: 1, -} - -// NamedTypeString retrieves an enum value from the enum constants string name. -// Throws an error if the param is not part of the enum. -func NamedTypeString(s string) (NamedType, error) { - if val, ok := _NamedTypeNameToValueMap[s]; ok { - return val, nil - } - return 0, fmt.Errorf("%s does not belong to NamedType values", s) -} - -// NamedTypeValues returns all values of the enum -func NamedTypeValues() []NamedType { - return _NamedTypeValues -} - -// IsANamedType returns "true" if the value is listed in the enum definition. "false" otherwise -func (i NamedType) IsANamedType() bool { - for _, v := range _NamedTypeValues { - if i == v { - return true - } - } - return false -} - -// MarshalJSON implements the json.Marshaler interface for NamedType -func (i NamedType) MarshalJSON() ([]byte, error) { - return json.Marshal(i.String()) -} - -// UnmarshalJSON implements the json.Unmarshaler interface for NamedType -func (i *NamedType) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return fmt.Errorf("NamedType should be a string, got %s", data) - } - - var err error - *i, err = NamedTypeString(s) - return err -} diff --git a/flytestdlib/cli/pflags/api/pflag_provider.go b/flytestdlib/cli/pflags/api/pflag_provider.go deleted file mode 100644 index 3de5a54368..0000000000 --- a/flytestdlib/cli/pflags/api/pflag_provider.go +++ /dev/null @@ -1,92 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "go/types" - "os" - "time" - - "github.com/ernesto-jimenez/gogen/imports" - goimports "golang.org/x/tools/imports" -) - -type PFlagProvider struct { - typeName string - pkg *types.Package - fields []FieldInfo - pflagValueTypes []PFlagValueType -} - -// Imports adds any needed imports for types not directly declared in this package. -func (p PFlagProvider) Imports() map[string]string { - imp := imports.New(p.pkg.Name()) - for _, m := range p.fields { - imp.AddImportsFrom(m.Typ) - } - - return imp.Imports() -} - -// WriteCodeFile evaluates the main code file template and writes the output to outputFilePath -func (p PFlagProvider) WriteCodeFile(outputFilePath string) error { - buf := bytes.Buffer{} - err := p.generate(GenerateCodeFile, &buf, outputFilePath) - if err != nil { - return fmt.Errorf("error generating code, Error: %v. Source: %v", err, buf.String()) - } - - return p.writeToFile(&buf, outputFilePath) -} - -// WriteTestFile evaluates the test code file template and writes the output to outputFilePath -func (p PFlagProvider) WriteTestFile(outputFilePath string) error { - buf := bytes.Buffer{} - err := p.generate(GenerateTestFile, &buf, outputFilePath) - if err != nil { - return fmt.Errorf("error generating code, Error: %v. Source: %v", err, buf.String()) - } - - return p.writeToFile(&buf, outputFilePath) -} - -func (p PFlagProvider) writeToFile(buffer *bytes.Buffer, fileName string) error { - return os.WriteFile(fileName, buffer.Bytes(), os.ModePerm) // #nosec G306 -} - -// generate evaluates the generator and writes the output to buffer. targetFileName is used only to influence how imports are -// generated/optimized. -func (p PFlagProvider) generate(generator func(buffer *bytes.Buffer, info TypeInfo) error, buffer *bytes.Buffer, targetFileName string) error { - info := TypeInfo{ - Name: p.typeName, - Fields: p.fields, - Package: p.pkg.Name(), - Timestamp: time.Now(), - Imports: p.Imports(), - PFlagValueTypes: p.pflagValueTypes, - } - - if err := generator(buffer, info); err != nil { - return err - } - - // Update imports - newBytes, err := goimports.Process(targetFileName, buffer.Bytes(), nil) - if err != nil { - return err - } - - buffer.Reset() - _, err = buffer.Write(newBytes) - - return err -} - -func newPflagProvider(pkg *types.Package, typeName string, fields []FieldInfo, pflagValueTypes []PFlagValueType) PFlagProvider { - return PFlagProvider{ - typeName: typeName, - pkg: pkg, - fields: fields, - pflagValueTypes: pflagValueTypes, - } -} diff --git a/flytestdlib/cli/pflags/api/sample.go b/flytestdlib/cli/pflags/api/sample.go deleted file mode 100644 index 1b17a7bb97..0000000000 --- a/flytestdlib/cli/pflags/api/sample.go +++ /dev/null @@ -1,72 +0,0 @@ -package api - -import ( - "encoding/json" - "errors" - - "github.com/flyteorg/flyte/v2/flytestdlib/storage" -) - -var DefaultTestType = &TestType{ - StringValue: "Welcome to defaults", -} - -type TestType struct { - StringValue string `json:"str" pflag:"\"hello world\",\"life is short\""` - BoolValue bool `json:"bl" pflag:"true"` - NestedType NestedType `json:"nested"` - IntArray []int `json:"ints" pflag:"[]int{12%2C1}"` - StringArray []string `json:"strs" pflag:"[]string{\"12\"%2C\"1\"}"` - ComplexJSONArray []ComplexJSONType `json:"complexArr"` - StringToJSON ComplexJSONType `json:"c" pflag:",I'm a complex type but can be converted from string."` - IgnoredMap map[string]string `json:"ignored-map" pflag:"-,"` - StorageConfig storage.Config `json:"storage"` - IntValue *int `json:"i"` - StringMap map[string]string `json:"m" pflag:",I'm a map of strings"` - ConstType NamedType `json:"constType"` - AliasType TestConstTypeAlias `json:"aliasType"` -} - -//go:generate enumer --type=NamedType --trimprefix=NamedType -json - -type NamedType int - -const ( - NamedTypeA NamedType = iota - NamedTypeB -) - -type TestConstTypeAlias = int - -type NestedType struct { - IntValue int `json:"i" pflag:",this is an important flag"` -} - -type ComplexJSONType struct { - StringValue string `json:"str"` - IntValue int `json:"i"` -} - -func (c *ComplexJSONType) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - c.StringValue = "" - return nil - } - - var v interface{} - if err := json.Unmarshal(b, &v); err != nil { - return err - } - switch value := v.(type) { - case string: - if len(value) == 0 { - c.StringValue = "" - } else { - c.StringValue = value - } - default: - return errors.New("invalid duration") - } - - return nil -} diff --git a/flytestdlib/cli/pflags/api/tag.go b/flytestdlib/cli/pflags/api/tag.go deleted file mode 100644 index 9b070743bc..0000000000 --- a/flytestdlib/cli/pflags/api/tag.go +++ /dev/null @@ -1,66 +0,0 @@ -package api - -import ( - "fmt" - "net/url" - "strings" - - "github.com/fatih/structtag" -) - -const ( - TagName = "pflag" - JSONTagName = "json" -) - -// Tag represents parsed PFlag Go-struct tag. -// type Foo struct { -// StringValue string `json:"str" pflag:"\"hello world\",This is a string value"` -// } -// Name will be "str", Default value is "hello world" and Usage is "This is a string value" -type Tag struct { - Name string - DefaultValue string - Usage string -} - -// ParseTag parses tag. Name is computed from json tag, defaultvalue is the name of the pflag tag and usage is the concatenation -// of all options for pflag tag. -// e.g. `json:"name" pflag:"2,this is a useful param"` -func ParseTag(tag string) (t Tag, err error) { - tags, err := structtag.Parse(tag) - if err != nil { - return Tag{}, err - } - - t = Tag{} - - jsonTag, err := tags.Get(JSONTagName) - if err == nil { - t.Name = jsonTag.Name - } - - pflagTag, err := tags.Get(TagName) - if err == nil { - t.DefaultValue, err = url.QueryUnescape(pflagTag.Name) - if err != nil { - fmt.Printf("Failed to Query unescape tag name [%v], will use value as is. Error: %v", pflagTag.Name, err) - t.DefaultValue = pflagTag.Name - } - - t.Usage = strings.Join(pflagTag.Options, ", ") - if len(t.Usage) == 0 { - t.Usage = `""` - } - - if t.Usage[0] != '"' { - t.Usage = fmt.Sprintf(`"%v"`, t.Usage) - } - } else { - // We receive an error when the tag isn't present (or is malformed). Because there is no strongly-typed way to - // do that, we will just set Usage to empty string and move on. - t.Usage = `""` - } - - return t, nil -} diff --git a/flytestdlib/cli/pflags/api/templates.go b/flytestdlib/cli/pflags/api/templates.go deleted file mode 100644 index f486ee4541..0000000000 --- a/flytestdlib/cli/pflags/api/templates.go +++ /dev/null @@ -1,234 +0,0 @@ -package api - -import ( - "bytes" - "text/template" -) - -func GenerateCodeFile(buffer *bytes.Buffer, info TypeInfo) error { - return mainTmpl.Execute(buffer, info) -} - -func GenerateTestFile(buffer *bytes.Buffer, info TypeInfo) error { - return testTmpl.Execute(buffer, info) -} - -var mainTmpl = template.Must(template.New("MainFile").Parse( - `// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package {{ .Package }} - -import ( - "encoding/json" - - "github.com/spf13/pflag" - "fmt" -{{range $path, $name := .Imports}} - {{$name}} "{{$path}}"{{end}} -) - -// If v is a pointer, it will get its element value or the zero value of the element type. -// If v is not a pointer, it will return it as is. -func ({{ .Name }}) elemValueOrNil(v interface{}) interface{} { - if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { - if reflect.ValueOf(v).IsNil() { - return reflect.Zero(t.Elem()).Interface() - } else { - return reflect.ValueOf(v).Interface() - } - } else if v == nil { - return reflect.Zero(t).Interface() - } - - return v -} - -func ({{ .Name }}) mustJsonMarshal(v interface{}) string { - raw, err := json.Marshal(v) - if err != nil { - panic(err) - } - - return string(raw) -} - -func ({{ .Name }}) mustMarshalJSON(v json.Marshaler) string { - raw, err := v.MarshalJSON() - if err != nil { - panic(err) - } - - return string(raw) -} - -// GetPFlagSet will return strongly types pflags for all fields in {{ .Name }} and its nested types. The format of the -// flags is json-name.json-sub-name... etc. -func (cfg {{ .Name }}) GetPFlagSet(prefix string) *pflag.FlagSet { - cmdFlags := pflag.NewFlagSet("{{ .Name }}", pflag.ExitOnError) - {{- range .Fields }} - {{- if eq .FlagMethodName "" }} - {{- if .ShouldBindDefault }} - cmdFlags.{{ .FlagMethodName }}Var(&{{ .DefaultValue }}, fmt.Sprintf("%v%v", prefix, "{{ .Name }}"), {{ .UsageString }}) - {{- else }} - cmdFlags.{{ .FlagMethodName }}(fmt.Sprintf("%v%v", prefix, "{{ .Name }}"), {{ .UsageString }}) - {{- end }} - {{- else }} - {{- if .ShouldBindDefault }} - cmdFlags.{{ .FlagMethodName }}Var(&{{ .DefaultValue }}, fmt.Sprintf("%v%v", prefix, "{{ .Name }}"), {{ .DefaultValue }}, {{ .UsageString }}) - {{- else }} - cmdFlags.{{ .FlagMethodName }}(fmt.Sprintf("%v%v", prefix, "{{ .Name }}"), {{ .DefaultValue }}, {{ .UsageString }}) - {{- end }} - {{- end }} - {{- end }} - return cmdFlags -} -`)) - -var testTmpl = template.Must(template.New("TestFile").Parse( - `// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package {{ .Package }} - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "testing" - - "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/assert" -{{- range $path, $name := .Imports}} - {{$name}} "{{$path}}" -{{- end}} -) - -var dereferencableKinds{{ .Name }} = map[reflect.Kind]struct{}{ - reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, -} - -// Checks if t is a kind that can be dereferenced to get its underlying type. -func canGetElement{{ .Name }}(t reflect.Kind) bool { - _, exists := dereferencableKinds{{ .Name }}[t] - return exists -} - -// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the -// object. Otherwise, it'll just pass on the original data. -func jsonUnmarshalerHook{{ .Name }}(_, to reflect.Type, data interface{}) (interface{}, error) { - unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || - (canGetElement{{ .Name }}(to.Kind()) && to.Elem().Implements(unmarshalerType)) { - - raw, err := json.Marshal(data) - if err != nil { - fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - res := reflect.New(to).Interface() - err = json.Unmarshal(raw, &res) - if err != nil { - fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - return res, nil - } - - return data, nil -} - -func decode_{{ .Name }}(input, result interface{}) error { - config := &mapstructure.DecoderConfig{ - TagName: "json", - WeaklyTypedInput: true, - Result: result, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - jsonUnmarshalerHook{{ .Name }}, - ), - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -func join_{{ .Name }}(arr interface{}, sep string) string { - listValue := reflect.ValueOf(arr) - strs := make([]string, 0, listValue.Len()) - for i := 0; i < listValue.Len(); i++ { - strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) - } - - return strings.Join(strs, sep) -} - -func testDecodeJson_{{ .Name }}(t *testing.T, val, result interface{}) { - assert.NoError(t, decode_{{ .Name }}(val, result)) -} - -func testDecodeRaw_{{ .Name }}(t *testing.T, vStringSlice, result interface{}) { - assert.NoError(t, decode_{{ .Name }}(vStringSlice, result)) -} - -func Test{{ .Name }}_GetPFlagSet(t *testing.T) { - val := {{ .Name }}{} - cmdFlags := val.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) -} - -func Test{{ .Name }}_SetFlags(t *testing.T) { - actual := {{ .Name }}{} - cmdFlags := actual.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) - - {{ $ParentName := .Name }} - {{- range .Fields }} - t.Run("Test_{{ .Name }}", func(t *testing.T) { {{ $varName := print "v" .FlagMethodName }} - {{- if .ShouldTestDefault }} - t.Run("DefaultValue", func(t *testing.T) { - // Test that default value is set properly - if {{ $varName }}, err := cmdFlags.Get{{ .FlagMethodName }}("{{ .Name }}"); err == nil { - assert.Equal(t, {{ .Typ }}({{ .DefaultValue }}), {{ $varName }}) - } else { - assert.FailNow(t, err.Error()) - } - }) - {{- end }} - - t.Run("Override", func(t *testing.T) { - {{ if eq .TestStrategy "Json" }}testValue := {{ .TestValue }} - {{ else if eq .TestStrategy "Raw" }}testValue := {{ .TestValue }} - {{ else }}testValue := join_{{ $ParentName }}({{ .TestValue }}, ",") - {{ end }} - cmdFlags.Set("{{ .Name }}", testValue) - {{- if eq .FlagMethodName "" }} - if {{ $varName }} := cmdFlags.Lookup("{{ .Name }}"); {{ $varName }} != nil { - {{ if eq .TestStrategy "Json" }}testDecodeJson_{{ $ParentName }}(t, fmt.Sprintf("%v", v.Value.String()), &actual.{{ .GoName }}) - {{ else if eq .TestStrategy "Raw" }}testDecodeRaw_{{ $ParentName }}(t, v.Value.String(), &actual.{{ .GoName }}) - {{ else }}testDecodeRaw_{{ $ParentName }}(t, join_{{ $ParentName }}({{ print "v" .FlagMethodName }}, ",").Value.String(), &actual.{{ .GoName }}) - {{ end }} - } - {{- else }} - if {{ $varName }}, err := cmdFlags.Get{{ .TestFlagMethodName }}("{{ .Name }}"); err == nil { - {{ if eq .TestStrategy "Json" }}testDecodeJson_{{ $ParentName }}(t, fmt.Sprintf("%v", {{ print "v" .FlagMethodName }}), &actual.{{ .GoName }}) - {{ else if eq .TestStrategy "Raw" }}testDecodeRaw_{{ $ParentName }}(t, {{ print "v" .FlagMethodName }}, &actual.{{ .GoName }}) - {{ else }}testDecodeRaw_{{ $ParentName }}(t, join_{{ $ParentName }}({{ print "v" .FlagMethodName }}, ","), &actual.{{ .GoName }}) - {{ end }} - } else { - assert.FailNow(t, err.Error()) - } - {{- end }} - }) - }) - {{- end }} -} -`)) diff --git a/flytestdlib/cli/pflags/api/testdata/testtype.go b/flytestdlib/cli/pflags/api/testdata/testtype.go deleted file mode 100755 index 5f95e2ce95..0000000000 --- a/flytestdlib/cli/pflags/api/testdata/testtype.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package api - -import ( - "encoding/json" - "reflect" - - "fmt" - - "github.com/spf13/pflag" -) - -// If v is a pointer, it will get its element value or the zero value of the element type. -// If v is not a pointer, it will return it as is. -func (TestType) elemValueOrNil(v interface{}) interface{} { - if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { - if reflect.ValueOf(v).IsNil() { - return reflect.Zero(t.Elem()).Interface() - } else { - return reflect.ValueOf(v).Interface() - } - } else if v == nil { - return reflect.Zero(t).Interface() - } - - return v -} - -func (TestType) mustJsonMarshal(v interface{}) string { - raw, err := json.Marshal(v) - if err != nil { - panic(err) - } - - return string(raw) -} - -func (TestType) mustMarshalJSON(v json.Marshaler) string { - raw, err := v.MarshalJSON() - if err != nil { - panic(err) - } - - return string(raw) -} - -// GetPFlagSet will return strongly types pflags for all fields in TestType and its nested types. The format of the -// flags is json-name.json-sub-name... etc. -func (cfg TestType) GetPFlagSet(prefix string) *pflag.FlagSet { - cmdFlags := pflag.NewFlagSet("TestType", pflag.ExitOnError) - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "str"), DefaultTestType.StringValue, "life is short") - cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "bl"), DefaultTestType.BoolValue, "") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "nested.i"), DefaultTestType.NestedType.IntValue, "this is an important flag") - cmdFlags.IntSlice(fmt.Sprintf("%v%v", prefix, "ints"), DefaultTestType.IntArray, "") - cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "strs"), DefaultTestType.StringArray, "") - cmdFlags.StringSlice(fmt.Sprintf("%v%v", prefix, "complexArr"), DefaultTestType.ComplexJSONArray, "") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "c"), DefaultTestType.mustJsonMarshal(DefaultTestType.StringToJSON), "I'm a complex type but can be converted from string.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.type"), DefaultTestType.StorageConfig.Type, "Sets the type of storage to configure [s3/minio/local/mem/stow].") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.connection.endpoint"), DefaultTestType.StorageConfig.Connection.Endpoint.String(), "URL for storage client to connect to.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.connection.auth-type"), DefaultTestType.StorageConfig.Connection.AuthType, "Auth Type to use [iam, accesskey].") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.connection.access-key"), DefaultTestType.StorageConfig.Connection.AccessKey, "Access key to use. Only required when authtype is set to accesskey.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.connection.secret-key"), DefaultTestType.StorageConfig.Connection.SecretKey, "Secret to use when accesskey is set.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.connection.region"), DefaultTestType.StorageConfig.Connection.Region, "Region to connect to.") - cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "storage.connection.disable-ssl"), DefaultTestType.StorageConfig.Connection.DisableSSL, "Disables SSL connection. Should only be used for development.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.stow.kind"), DefaultTestType.StorageConfig.Stow.Kind, "Kind of Stow backend to use. Refer to github/flyteorg/stow") - cmdFlags.StringToString(fmt.Sprintf("%v%v", prefix, "storage.stow.config"), DefaultTestType.StorageConfig.Stow.Config, "Configuration for stow backend. Refer to github/flyteorg/stow") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.container"), DefaultTestType.StorageConfig.InitContainer, "Initial container (in s3 a bucket) to create -if it doesn't exist-.'") - cmdFlags.Bool(fmt.Sprintf("%v%v", prefix, "storage.enable-multicontainer"), DefaultTestType.StorageConfig.MultiContainerEnabled, "If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "storage.cache.max_size_mbs"), DefaultTestType.StorageConfig.Cache.MaxSizeMegabytes, "Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "storage.cache.target_gc_percent"), DefaultTestType.StorageConfig.Cache.TargetGCPercent, "Sets the garbage collection target percentage.") - cmdFlags.Int64(fmt.Sprintf("%v%v", prefix, "storage.limits.maxDownloadMBs"), DefaultTestType.StorageConfig.Limits.GetLimitMegabytes, "Maximum allowed download size (in MBs) per call.") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "storage.defaultHttpClient.timeout"), DefaultTestType.StorageConfig.DefaultHTTPClient.Timeout.String(), "Sets time out on the http client.") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "i"), DefaultTestType.elemValueOrNil(DefaultTestType.IntValue).(int), "") - cmdFlags.StringToString(fmt.Sprintf("%v%v", prefix, "m"), DefaultTestType.StringMap, "I'm a map of strings") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "constType"), DefaultTestType.ConstType.String(), "") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "aliasType"), DefaultTestType.AliasType, "") - return cmdFlags -} diff --git a/flytestdlib/cli/pflags/api/testdata/testtype_bind.go b/flytestdlib/cli/pflags/api/testdata/testtype_bind.go deleted file mode 100755 index 4fef798cce..0000000000 --- a/flytestdlib/cli/pflags/api/testdata/testtype_bind.go +++ /dev/null @@ -1,80 +0,0 @@ -// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package api - -import ( - "encoding/json" - "reflect" - - "fmt" - - "github.com/spf13/pflag" -) - -// If v is a pointer, it will get its element value or the zero value of the element type. -// If v is not a pointer, it will return it as is. -func (TestType) elemValueOrNil(v interface{}) interface{} { - if t := reflect.TypeOf(v); t.Kind() == reflect.Ptr { - if reflect.ValueOf(v).IsNil() { - return reflect.Zero(t.Elem()).Interface() - } else { - return reflect.ValueOf(v).Interface() - } - } else if v == nil { - return reflect.Zero(t).Interface() - } - - return v -} - -func (TestType) mustJsonMarshal(v interface{}) string { - raw, err := json.Marshal(v) - if err != nil { - panic(err) - } - - return string(raw) -} - -func (TestType) mustMarshalJSON(v json.Marshaler) string { - raw, err := v.MarshalJSON() - if err != nil { - panic(err) - } - - return string(raw) -} - -// GetPFlagSet will return strongly types pflags for all fields in TestType and its nested types. The format of the -// flags is json-name.json-sub-name... etc. -func (cfg TestType) GetPFlagSet(prefix string) *pflag.FlagSet { - cmdFlags := pflag.NewFlagSet("TestType", pflag.ExitOnError) - cmdFlags.StringVar(&DefaultTestType.StringValue, fmt.Sprintf("%v%v", prefix, "str"), DefaultTestType.StringValue, "life is short") - cmdFlags.BoolVar(&DefaultTestType.BoolValue, fmt.Sprintf("%v%v", prefix, "bl"), DefaultTestType.BoolValue, "") - cmdFlags.IntVar(&DefaultTestType.NestedType.IntValue, fmt.Sprintf("%v%v", prefix, "nested.i"), DefaultTestType.NestedType.IntValue, "this is an important flag") - cmdFlags.IntSliceVar(&DefaultTestType.IntArray, fmt.Sprintf("%v%v", prefix, "ints"), DefaultTestType.IntArray, "") - cmdFlags.StringSliceVar(&DefaultTestType.StringArray, fmt.Sprintf("%v%v", prefix, "strs"), DefaultTestType.StringArray, "") - cmdFlags.StringSliceVar(&DefaultTestType.ComplexJSONArray, fmt.Sprintf("%v%v", prefix, "complexArr"), DefaultTestType.ComplexJSONArray, "") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "c"), DefaultTestType.mustJsonMarshal(DefaultTestType.StringToJSON), "I'm a complex type but can be converted from string.") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Type, fmt.Sprintf("%v%v", prefix, "storage.type"), DefaultTestType.StorageConfig.Type, "Sets the type of storage to configure [s3/minio/local/mem/stow].") - cmdFlags.Var(&DefaultTestType.StorageConfig.Connection.Endpoint, fmt.Sprintf("%v%v", prefix, "storage.connection.endpoint"), "URL for storage client to connect to.") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Connection.AuthType, fmt.Sprintf("%v%v", prefix, "storage.connection.auth-type"), DefaultTestType.StorageConfig.Connection.AuthType, "Auth Type to use [iam, accesskey].") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Connection.AccessKey, fmt.Sprintf("%v%v", prefix, "storage.connection.access-key"), DefaultTestType.StorageConfig.Connection.AccessKey, "Access key to use. Only required when authtype is set to accesskey.") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Connection.SecretKey, fmt.Sprintf("%v%v", prefix, "storage.connection.secret-key"), DefaultTestType.StorageConfig.Connection.SecretKey, "Secret to use when accesskey is set.") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Connection.Region, fmt.Sprintf("%v%v", prefix, "storage.connection.region"), DefaultTestType.StorageConfig.Connection.Region, "Region to connect to.") - cmdFlags.BoolVar(&DefaultTestType.StorageConfig.Connection.DisableSSL, fmt.Sprintf("%v%v", prefix, "storage.connection.disable-ssl"), DefaultTestType.StorageConfig.Connection.DisableSSL, "Disables SSL connection. Should only be used for development.") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.Stow.Kind, fmt.Sprintf("%v%v", prefix, "storage.stow.kind"), DefaultTestType.StorageConfig.Stow.Kind, "Kind of Stow backend to use. Refer to github/flyteorg/stow") - cmdFlags.StringToStringVar(&DefaultTestType.StorageConfig.Stow.Config, fmt.Sprintf("%v%v", prefix, "storage.stow.config"), DefaultTestType.StorageConfig.Stow.Config, "Configuration for stow backend. Refer to github/flyteorg/stow") - cmdFlags.StringVar(&DefaultTestType.StorageConfig.InitContainer, fmt.Sprintf("%v%v", prefix, "storage.container"), DefaultTestType.StorageConfig.InitContainer, "Initial container (in s3 a bucket) to create -if it doesn't exist-.'") - cmdFlags.BoolVar(&DefaultTestType.StorageConfig.MultiContainerEnabled, fmt.Sprintf("%v%v", prefix, "storage.enable-multicontainer"), DefaultTestType.StorageConfig.MultiContainerEnabled, "If this is true, then the container argument is overlooked and redundant. This config will automatically open new connections to new containers/buckets as they are encountered") - cmdFlags.IntVar(&DefaultTestType.StorageConfig.Cache.MaxSizeMegabytes, fmt.Sprintf("%v%v", prefix, "storage.cache.max_size_mbs"), DefaultTestType.StorageConfig.Cache.MaxSizeMegabytes, "Maximum size of the cache where the Blob store data is cached in-memory. If not specified or set to 0, cache is not used") - cmdFlags.IntVar(&DefaultTestType.StorageConfig.Cache.TargetGCPercent, fmt.Sprintf("%v%v", prefix, "storage.cache.target_gc_percent"), DefaultTestType.StorageConfig.Cache.TargetGCPercent, "Sets the garbage collection target percentage.") - cmdFlags.Int64Var(&DefaultTestType.StorageConfig.Limits.GetLimitMegabytes, fmt.Sprintf("%v%v", prefix, "storage.limits.maxDownloadMBs"), DefaultTestType.StorageConfig.Limits.GetLimitMegabytes, "Maximum allowed download size (in MBs) per call.") - cmdFlags.Var(&DefaultTestType.StorageConfig.DefaultHTTPClient.Timeout, fmt.Sprintf("%v%v", prefix, "storage.defaultHttpClient.timeout"), "Sets time out on the http client.") - cmdFlags.Int(fmt.Sprintf("%v%v", prefix, "i"), DefaultTestType.elemValueOrNil(DefaultTestType.IntValue).(int), "") - cmdFlags.StringToStringVar(&DefaultTestType.StringMap, fmt.Sprintf("%v%v", prefix, "m"), DefaultTestType.StringMap, "I'm a map of strings") - cmdFlags.String(fmt.Sprintf("%v%v", prefix, "constType"), DefaultTestType.ConstType.String(), "") - cmdFlags.IntVar(&DefaultTestType.AliasType, fmt.Sprintf("%v%v", prefix, "aliasType"), DefaultTestType.AliasType, "") - return cmdFlags -} diff --git a/flytestdlib/cli/pflags/api/testdata/testtype_bind_test.go b/flytestdlib/cli/pflags/api/testdata/testtype_bind_test.go deleted file mode 100755 index 6aeacdaab7..0000000000 --- a/flytestdlib/cli/pflags/api/testdata/testtype_bind_test.go +++ /dev/null @@ -1,462 +0,0 @@ -// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package api - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "testing" - - "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/assert" -) - -var dereferencableKindsTestType = map[reflect.Kind]struct{}{ - reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, -} - -// Checks if t is a kind that can be dereferenced to get its underlying type. -func canGetElementTestType(t reflect.Kind) bool { - _, exists := dereferencableKindsTestType[t] - return exists -} - -// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the -// object. Otherwise, it'll just pass on the original data. -func jsonUnmarshalerHookTestType(_, to reflect.Type, data interface{}) (interface{}, error) { - unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || - (canGetElementTestType(to.Kind()) && to.Elem().Implements(unmarshalerType)) { - - raw, err := json.Marshal(data) - if err != nil { - fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - res := reflect.New(to).Interface() - err = json.Unmarshal(raw, &res) - if err != nil { - fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - return res, nil - } - - return data, nil -} - -func decode_TestType(input, result interface{}) error { - config := &mapstructure.DecoderConfig{ - TagName: "json", - WeaklyTypedInput: true, - Result: result, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - jsonUnmarshalerHookTestType, - ), - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -func join_TestType(arr interface{}, sep string) string { - listValue := reflect.ValueOf(arr) - strs := make([]string, 0, listValue.Len()) - for i := 0; i < listValue.Len(); i++ { - strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) - } - - return strings.Join(strs, sep) -} - -func testDecodeJson_TestType(t *testing.T, val, result interface{}) { - assert.NoError(t, decode_TestType(val, result)) -} - -func testDecodeRaw_TestType(t *testing.T, vStringSlice, result interface{}) { - assert.NoError(t, decode_TestType(vStringSlice, result)) -} - -func TestTestType_GetPFlagSet(t *testing.T) { - val := TestType{} - cmdFlags := val.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) -} - -func TestTestType_SetFlags(t *testing.T) { - actual := TestType{} - cmdFlags := actual.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) - - t.Run("Test_str", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("str", testValue) - if vString, err := cmdFlags.GetString("str"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StringValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_bl", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("bl", testValue) - if vBool, err := cmdFlags.GetBool("bl"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.BoolValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_nested.i", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("nested.i", testValue) - if vInt, err := cmdFlags.GetInt("nested.i"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.NestedType.IntValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_ints", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := join_TestType(DefaultTestType.IntArray, ",") - - cmdFlags.Set("ints", testValue) - if vIntSlice, err := cmdFlags.GetIntSlice("ints"); err == nil { - testDecodeRaw_TestType(t, join_TestType(vIntSlice, ","), &actual.IntArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_strs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := join_TestType(DefaultTestType.StringArray, ",") - - cmdFlags.Set("strs", testValue) - if vStringSlice, err := cmdFlags.GetStringSlice("strs"); err == nil { - testDecodeRaw_TestType(t, join_TestType(vStringSlice, ","), &actual.StringArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_complexArr", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.ComplexJSONArray - - cmdFlags.Set("complexArr", testValue) - if vStringSlice, err := cmdFlags.GetStringSlice("complexArr"); err == nil { - testDecodeRaw_TestType(t, vStringSlice, &actual.ComplexJSONArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_c", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.mustJsonMarshal(DefaultTestType.StringToJSON) - - cmdFlags.Set("c", testValue) - if vString, err := cmdFlags.GetString("c"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StringToJSON) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.type", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.type", testValue) - if vString, err := cmdFlags.GetString("storage.type"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Type) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.endpoint", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.StorageConfig.Connection.Endpoint.String() - - cmdFlags.Set("storage.connection.endpoint", testValue) - if v := cmdFlags.Lookup("storage.connection.endpoint"); v != nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", v.Value.String()), &actual.StorageConfig.Connection.Endpoint) - - } - }) - }) - t.Run("Test_storage.connection.auth-type", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.auth-type", testValue) - if vString, err := cmdFlags.GetString("storage.connection.auth-type"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.AuthType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.access-key", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.access-key", testValue) - if vString, err := cmdFlags.GetString("storage.connection.access-key"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.AccessKey) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.secret-key", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.secret-key", testValue) - if vString, err := cmdFlags.GetString("storage.connection.secret-key"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.SecretKey) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.region", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.region", testValue) - if vString, err := cmdFlags.GetString("storage.connection.region"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.Region) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.disable-ssl", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.disable-ssl", testValue) - if vBool, err := cmdFlags.GetBool("storage.connection.disable-ssl"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.StorageConfig.Connection.DisableSSL) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.stow.kind", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.stow.kind", testValue) - if vString, err := cmdFlags.GetString("storage.stow.kind"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Stow.Kind) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.stow.config", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "a=1,b=2" - - cmdFlags.Set("storage.stow.config", testValue) - if vStringToString, err := cmdFlags.GetStringToString("storage.stow.config"); err == nil { - testDecodeRaw_TestType(t, vStringToString, &actual.StorageConfig.Stow.Config) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.container", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.container", testValue) - if vString, err := cmdFlags.GetString("storage.container"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.InitContainer) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.enable-multicontainer", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.enable-multicontainer", testValue) - if vBool, err := cmdFlags.GetBool("storage.enable-multicontainer"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.StorageConfig.MultiContainerEnabled) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.cache.max_size_mbs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.cache.max_size_mbs", testValue) - if vInt, err := cmdFlags.GetInt("storage.cache.max_size_mbs"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.StorageConfig.Cache.MaxSizeMegabytes) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.cache.target_gc_percent", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.cache.target_gc_percent", testValue) - if vInt, err := cmdFlags.GetInt("storage.cache.target_gc_percent"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.StorageConfig.Cache.TargetGCPercent) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.limits.maxDownloadMBs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.limits.maxDownloadMBs", testValue) - if vInt64, err := cmdFlags.GetInt64("storage.limits.maxDownloadMBs"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt64), &actual.StorageConfig.Limits.GetLimitMegabytes) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.defaultHttpClient.timeout", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.StorageConfig.DefaultHTTPClient.Timeout.String() - - cmdFlags.Set("storage.defaultHttpClient.timeout", testValue) - if v := cmdFlags.Lookup("storage.defaultHttpClient.timeout"); v != nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", v.Value.String()), &actual.StorageConfig.DefaultHTTPClient.Timeout) - - } - }) - }) - t.Run("Test_i", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("i", testValue) - if vInt, err := cmdFlags.GetInt("i"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.IntValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_m", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "a=1,b=2" - - cmdFlags.Set("m", testValue) - if vStringToString, err := cmdFlags.GetStringToString("m"); err == nil { - testDecodeRaw_TestType(t, vStringToString, &actual.StringMap) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_constType", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("constType", testValue) - if vString, err := cmdFlags.GetString("constType"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.ConstType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_aliasType", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("aliasType", testValue) - if vInt, err := cmdFlags.GetInt("aliasType"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.AliasType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) -} diff --git a/flytestdlib/cli/pflags/api/testdata/testtype_test.go b/flytestdlib/cli/pflags/api/testdata/testtype_test.go deleted file mode 100755 index f59d0a2454..0000000000 --- a/flytestdlib/cli/pflags/api/testdata/testtype_test.go +++ /dev/null @@ -1,466 +0,0 @@ -// Code generated by go generate; DO NOT EDIT. -// This file was generated by robots. - -package api - -import ( - "encoding/json" - "fmt" - "reflect" - "strings" - "testing" - - "github.com/mitchellh/mapstructure" - "github.com/stretchr/testify/assert" -) - -var dereferencableKindsTestType = map[reflect.Kind]struct{}{ - reflect.Array: {}, reflect.Chan: {}, reflect.Map: {}, reflect.Ptr: {}, reflect.Slice: {}, -} - -// Checks if t is a kind that can be dereferenced to get its underlying type. -func canGetElementTestType(t reflect.Kind) bool { - _, exists := dereferencableKindsTestType[t] - return exists -} - -// This decoder hook tests types for json unmarshaling capability. If implemented, it uses json unmarshal to build the -// object. Otherwise, it'll just pass on the original data. -func jsonUnmarshalerHookTestType(_, to reflect.Type, data interface{}) (interface{}, error) { - unmarshalerType := reflect.TypeOf((*json.Unmarshaler)(nil)).Elem() - if to.Implements(unmarshalerType) || reflect.PtrTo(to).Implements(unmarshalerType) || - (canGetElementTestType(to.Kind()) && to.Elem().Implements(unmarshalerType)) { - - raw, err := json.Marshal(data) - if err != nil { - fmt.Printf("Failed to marshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - res := reflect.New(to).Interface() - err = json.Unmarshal(raw, &res) - if err != nil { - fmt.Printf("Failed to umarshal Data: %v. Error: %v. Skipping jsonUnmarshalHook", data, err) - return data, nil - } - - return res, nil - } - - return data, nil -} - -func decode_TestType(input, result interface{}) error { - config := &mapstructure.DecoderConfig{ - TagName: "json", - WeaklyTypedInput: true, - Result: result, - DecodeHook: mapstructure.ComposeDecodeHookFunc( - mapstructure.StringToTimeDurationHookFunc(), - mapstructure.StringToSliceHookFunc(","), - jsonUnmarshalerHookTestType, - ), - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(input) -} - -func join_TestType(arr interface{}, sep string) string { - listValue := reflect.ValueOf(arr) - strs := make([]string, 0, listValue.Len()) - for i := 0; i < listValue.Len(); i++ { - strs = append(strs, fmt.Sprintf("%v", listValue.Index(i))) - } - - return strings.Join(strs, sep) -} - -func testDecodeJson_TestType(t *testing.T, val, result interface{}) { - assert.NoError(t, decode_TestType(val, result)) -} - -func testDecodeRaw_TestType(t *testing.T, vStringSlice, result interface{}) { - assert.NoError(t, decode_TestType(vStringSlice, result)) -} - -func TestTestType_GetPFlagSet(t *testing.T) { - val := TestType{} - cmdFlags := val.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) -} - -func TestTestType_SetFlags(t *testing.T) { - actual := TestType{} - cmdFlags := actual.GetPFlagSet("") - assert.True(t, cmdFlags.HasFlags()) - - t.Run("Test_str", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("str", testValue) - if vString, err := cmdFlags.GetString("str"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StringValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_bl", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("bl", testValue) - if vBool, err := cmdFlags.GetBool("bl"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.BoolValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_nested.i", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("nested.i", testValue) - if vInt, err := cmdFlags.GetInt("nested.i"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.NestedType.IntValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_ints", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := join_TestType(DefaultTestType.IntArray, ",") - - cmdFlags.Set("ints", testValue) - if vIntSlice, err := cmdFlags.GetIntSlice("ints"); err == nil { - testDecodeRaw_TestType(t, join_TestType(vIntSlice, ","), &actual.IntArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_strs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := join_TestType(DefaultTestType.StringArray, ",") - - cmdFlags.Set("strs", testValue) - if vStringSlice, err := cmdFlags.GetStringSlice("strs"); err == nil { - testDecodeRaw_TestType(t, join_TestType(vStringSlice, ","), &actual.StringArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_complexArr", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.ComplexJSONArray - - cmdFlags.Set("complexArr", testValue) - if vStringSlice, err := cmdFlags.GetStringSlice("complexArr"); err == nil { - testDecodeRaw_TestType(t, vStringSlice, &actual.ComplexJSONArray) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_c", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.mustJsonMarshal(DefaultTestType.StringToJSON) - - cmdFlags.Set("c", testValue) - if vString, err := cmdFlags.GetString("c"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StringToJSON) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.type", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.type", testValue) - if vString, err := cmdFlags.GetString("storage.type"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Type) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.endpoint", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.StorageConfig.Connection.Endpoint.String() - - cmdFlags.Set("storage.connection.endpoint", testValue) - if vString, err := cmdFlags.GetString("storage.connection.endpoint"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.Endpoint) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.auth-type", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.auth-type", testValue) - if vString, err := cmdFlags.GetString("storage.connection.auth-type"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.AuthType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.access-key", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.access-key", testValue) - if vString, err := cmdFlags.GetString("storage.connection.access-key"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.AccessKey) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.secret-key", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.secret-key", testValue) - if vString, err := cmdFlags.GetString("storage.connection.secret-key"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.SecretKey) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.region", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.region", testValue) - if vString, err := cmdFlags.GetString("storage.connection.region"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Connection.Region) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.connection.disable-ssl", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.connection.disable-ssl", testValue) - if vBool, err := cmdFlags.GetBool("storage.connection.disable-ssl"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.StorageConfig.Connection.DisableSSL) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.stow.kind", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.stow.kind", testValue) - if vString, err := cmdFlags.GetString("storage.stow.kind"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.Stow.Kind) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.stow.config", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "a=1,b=2" - - cmdFlags.Set("storage.stow.config", testValue) - if vStringToString, err := cmdFlags.GetStringToString("storage.stow.config"); err == nil { - testDecodeRaw_TestType(t, vStringToString, &actual.StorageConfig.Stow.Config) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.container", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.container", testValue) - if vString, err := cmdFlags.GetString("storage.container"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.InitContainer) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.enable-multicontainer", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.enable-multicontainer", testValue) - if vBool, err := cmdFlags.GetBool("storage.enable-multicontainer"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vBool), &actual.StorageConfig.MultiContainerEnabled) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.cache.max_size_mbs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.cache.max_size_mbs", testValue) - if vInt, err := cmdFlags.GetInt("storage.cache.max_size_mbs"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.StorageConfig.Cache.MaxSizeMegabytes) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.cache.target_gc_percent", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.cache.target_gc_percent", testValue) - if vInt, err := cmdFlags.GetInt("storage.cache.target_gc_percent"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.StorageConfig.Cache.TargetGCPercent) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.limits.maxDownloadMBs", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("storage.limits.maxDownloadMBs", testValue) - if vInt64, err := cmdFlags.GetInt64("storage.limits.maxDownloadMBs"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt64), &actual.StorageConfig.Limits.GetLimitMegabytes) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_storage.defaultHttpClient.timeout", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := DefaultTestType.StorageConfig.DefaultHTTPClient.Timeout.String() - - cmdFlags.Set("storage.defaultHttpClient.timeout", testValue) - if vString, err := cmdFlags.GetString("storage.defaultHttpClient.timeout"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.StorageConfig.DefaultHTTPClient.Timeout) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_i", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("i", testValue) - if vInt, err := cmdFlags.GetInt("i"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.IntValue) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_m", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "a=1,b=2" - - cmdFlags.Set("m", testValue) - if vStringToString, err := cmdFlags.GetStringToString("m"); err == nil { - testDecodeRaw_TestType(t, vStringToString, &actual.StringMap) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_constType", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("constType", testValue) - if vString, err := cmdFlags.GetString("constType"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vString), &actual.ConstType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) - t.Run("Test_aliasType", func(t *testing.T) { - - t.Run("Override", func(t *testing.T) { - testValue := "1" - - cmdFlags.Set("aliasType", testValue) - if vInt, err := cmdFlags.GetInt("aliasType"); err == nil { - testDecodeJson_TestType(t, fmt.Sprintf("%v", vInt), &actual.AliasType) - - } else { - assert.FailNow(t, err.Error()) - } - }) - }) -} diff --git a/flytestdlib/cli/pflags/api/types.go b/flytestdlib/cli/pflags/api/types.go deleted file mode 100644 index 051433b0cf..0000000000 --- a/flytestdlib/cli/pflags/api/types.go +++ /dev/null @@ -1,46 +0,0 @@ -package api - -import ( - "go/types" - "time" -) - -// TestStrategy determines how tests should be generated. -type TestStrategy string - -const ( - JSON TestStrategy = "Json" - SliceJoined TestStrategy = "SliceJoined" - Raw TestStrategy = "Raw" -) - -type FieldInfo struct { - Name string - GoName string - Typ types.Type - LocalTypeName string - DefaultValue string - UsageString string - FlagMethodName string - TestFlagMethodName string - TestValue string - TestStrategy TestStrategy - ShouldBindDefault bool - ShouldTestDefault bool -} - -// TypeInfo holds the finalized information passed to the template for evaluation. -type TypeInfo struct { - Timestamp time.Time - Fields []FieldInfo - PFlagValueTypes []PFlagValueType - Package string - Name string - TypeRef string - Imports map[string]string -} - -type PFlagValueType struct { - Name string - ShouldGenerateSetAndType bool -} diff --git a/flytestdlib/cli/pflags/api/utils.go b/flytestdlib/cli/pflags/api/utils.go deleted file mode 100644 index 2cb6674579..0000000000 --- a/flytestdlib/cli/pflags/api/utils.go +++ /dev/null @@ -1,82 +0,0 @@ -package api - -import ( - "bytes" - "fmt" - "go/types" - "unicode" - - "k8s.io/apimachinery/pkg/util/sets" -) - -func camelCase(str string) string { - if len(str) == 0 { - return str - } - - firstRune := bytes.Runes([]byte(str))[0] - if unicode.IsLower(firstRune) { - return fmt.Sprintf("%v%v", string(unicode.ToUpper(firstRune)), str[1:]) - } - - return str -} - -func isJSONUnmarshaler(t types.Type) bool { - return implementsAnyOfMethods(t, "UnmarshalJSON") -} - -func isJSONMarshaler(t types.Type) bool { - return implementsAnyOfMethods(t, "MarshalJSON") -} - -func isStringer(t types.Type) bool { - return implementsAnyOfMethods(t, "String") -} - -func isPFlagValue(t types.Type) bool { - return implementsAllOfMethods(t, "String", "Set", "Type") -} - -func hasStringConstructor(t interface { - Obj() *types.TypeName -}) bool { - return t.Obj().Parent().Lookup(fmt.Sprintf("%sString", t.Obj().Name())) != nil -} - -func implementsAnyOfMethods(t types.Type, methodNames ...string) (found bool) { - mset := types.NewMethodSet(t) - for _, name := range methodNames { - if mset.Lookup(nil, name) != nil { - return true - } - } - - mset = types.NewMethodSet(types.NewPointer(t)) - for _, name := range methodNames { - if mset.Lookup(nil, name) != nil { - return true - } - } - - return false -} - -func implementsAllOfMethods(t types.Type, methodNames ...string) (found bool) { - mset := types.NewMethodSet(t) - foundMethods := sets.NewString() - for _, name := range methodNames { - if foundMethod := mset.Lookup(nil, name); foundMethod != nil { - foundMethods.Insert(name) - } - } - - mset = types.NewMethodSet(types.NewPointer(t)) - for _, name := range methodNames { - if mset.Lookup(nil, name) != nil { - foundMethods.Insert(name) - } - } - - return foundMethods.Len() == len(methodNames) -} diff --git a/flytestdlib/cli/pflags/cmd/root.go b/flytestdlib/cli/pflags/cmd/root.go deleted file mode 100644 index f7d9e31b7c..0000000000 --- a/flytestdlib/cli/pflags/cmd/root.go +++ /dev/null @@ -1,75 +0,0 @@ -package cmd - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/spf13/cobra" - - "github.com/flyteorg/flyte/v2/flytestdlib/cli/pflags/api" - "github.com/flyteorg/flyte/v2/flytestdlib/logger" -) - -var ( - pkg string - defaultValuesVariable string - shouldBindDefaultVariable bool -) - -var root = cobra.Command{ - Use: "pflags MyStructName --package myproject/mypackage", - Args: cobra.ExactArgs(1), - RunE: generatePflagsProvider, - Example: ` -// go:generate pflags MyStruct -type MyStruct struct { - BoolValue bool ` + "`json:\"bl\" pflag:\"true\"`" + ` - NestedType NestedType ` + "`json:\"nested\"`" + ` - IntArray []int ` + "`json:\"ints\" pflag:\"[]int{12%2C1}\"`" + ` -} - `, -} - -func init() { - root.Flags().StringVarP(&pkg, "package", "p", ".", "Determines the source/destination package.") - root.Flags().StringVar(&defaultValuesVariable, "default-var", "defaultConfig", "Points to a variable to use to load default configs. If specified & found, it'll be used instead of the values specified in the tag.") - root.Flags().BoolVar(&shouldBindDefaultVariable, "bind-default-var", false, "The generated PFlags Set will bind fields to the default variable.") -} - -func Execute() error { - return root.Execute() -} - -func generatePflagsProvider(cmd *cobra.Command, args []string) error { - structName := args[0] - if structName == "" { - return fmt.Errorf("need to specify a struct name") - } - - ctx := context.Background() - gen, err := api.NewGenerator(pkg, structName, defaultValuesVariable, shouldBindDefaultVariable) - if err != nil { - return err - } - - provider, err := gen.Generate(ctx) - if err != nil { - return err - } - - var buf bytes.Buffer - defer buf.Reset() - - logger.Infof(ctx, "Generating PFlags for type [%v.%v.%v]\n", gen.GetTargetPackage().Path(), gen.GetTargetPackage().Name(), structName) - - outFilePath := fmt.Sprintf("%s_flags.go", strings.ToLower(structName)) - err = provider.WriteCodeFile(outFilePath) - if err != nil { - return err - } - - tOutFilePath := fmt.Sprintf("%s_flags_test.go", strings.ToLower(structName)) - return provider.WriteTestFile(tOutFilePath) -} diff --git a/flytestdlib/cli/pflags/cmd/version.go b/flytestdlib/cli/pflags/cmd/version.go deleted file mode 100644 index b490a23eea..0000000000 --- a/flytestdlib/cli/pflags/cmd/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package cmd - -import ( - "github.com/spf13/cobra" - - "github.com/flyteorg/flyte/v2/flytestdlib/version" -) - -var versionCmd = &cobra.Command{ - Aliases: []string{"version", "ver"}, - Run: func(cmd *cobra.Command, args []string) { - version.LogBuildInformation("pflags") - }, -} - -func init() { - root.AddCommand(versionCmd) -} diff --git a/flytestdlib/cli/pflags/main.go b/flytestdlib/cli/pflags/main.go deleted file mode 100644 index c2be45eac0..0000000000 --- a/flytestdlib/cli/pflags/main.go +++ /dev/null @@ -1,15 +0,0 @@ -// Generates a Register method to automatically add pflags to a pflagSet for all fields in a given type. -package main - -import ( - "log" - - "github.com/flyteorg/flyte/v2/flytestdlib/cli/pflags/cmd" -) - -func main() { - err := cmd.Execute() - if err != nil { - log.Fatal(err) - } -} diff --git a/flytestdlib/cli/pflags/readme.rst b/flytestdlib/cli/pflags/readme.rst deleted file mode 100644 index 84d6d105d8..0000000000 --- a/flytestdlib/cli/pflags/readme.rst +++ /dev/null @@ -1,24 +0,0 @@ -================ -Pflags Generator -================ - -This tool enables you to generate code to add pflags for all fields in a struct (recursively). In conjunction with the config package, this can be useful to generate cli flags that overrides configs while maintaining type safety and not having to deal with string typos. - -Getting Started -^^^^^^^^^^^^^^^ - - ``go get github.com/lyft/flytestdlib/cli/pflags`` - - call ``pflags --package `` OR - - add ``//go:generate pflags `` to the top of the file where the struct is declared. - has to be a struct type (it can't be, for instance, a slice type). - Supported fields' types within the struct: basic types (string, int8, int16, int32, int64, bool), json-unmarshalable types and other structs that conform to the same rules or slices of these types. - -This generates two files (struct_name_pflags.go and struct_name_pflags_test.go). If you open those, you will notice that all generated flags default to empty/zero values and no usage strings. That behavior can be customized using ``pflag`` tag. - -.. code-block:: - - type TestType struct { - StringValue string `json:"str" pflag:"\"hello world\",\"life is short\""` - BoolValue bool `json:"bl" pflag:",This is a bool value that will default to false."` - } - -``pflag`` tag is a comma-separated list. First item represents default value. Second value is usage. diff --git a/flytestdlib/storage/copy_impl_test.go b/flytestdlib/storage/copy_impl_test.go index 4397321c37..78103ceb4d 100644 --- a/flytestdlib/storage/copy_impl_test.go +++ b/flytestdlib/storage/copy_impl_test.go @@ -98,11 +98,11 @@ func TestCopyRaw_CachingErrorHandling(t *testing.T) { ReadRawCb: func(ctx context.Context, reference DataReference) (closer io.ReadCloser, e error) { readerCalled = true //nolint:govet,staticcheck - return ioutils.NewBytesReadCloser(bigD), errors.Wrapf(ErrFailedToWriteCache, fmt.Errorf(dummyErrorMsg), "Failed to Cache the metadata") + return ioutils.NewBytesReadCloser(bigD), errors.Wrapf(ErrFailedToWriteCache, fmt.Errorf("%s", dummyErrorMsg), "Failed to Cache the metadata") }, WriteRawCb: func(ctx context.Context, reference DataReference, size int64, opts Options, raw io.Reader) error { writerCalled = true - return errors.Wrapf(ErrFailedToWriteCache, fmt.Errorf(dummyErrorMsg), "Failed to Cache the metadata") //nolint:govet,staticcheck + return errors.Wrapf(ErrFailedToWriteCache, fmt.Errorf("%s", dummyErrorMsg), "Failed to Cache the metadata") //nolint:govet,staticcheck }, } @@ -124,11 +124,11 @@ func TestCopyRaw_CachingErrorHandling(t *testing.T) { store := dummyStore{ ReadRawCb: func(ctx context.Context, reference DataReference) (closer io.ReadCloser, e error) { readerCalled = true - return ioutils.NewBytesReadCloser(bigD), fmt.Errorf(dummyErrorMsg) //nolint:govet,staticcheck + return ioutils.NewBytesReadCloser(bigD), fmt.Errorf("%s", dummyErrorMsg) //nolint:govet,staticcheck }, WriteRawCb: func(ctx context.Context, reference DataReference, size int64, opts Options, raw io.Reader) error { writerCalled = true - return fmt.Errorf(dummyErrorMsg) //nolint:govet,staticcheck + return fmt.Errorf("%s", dummyErrorMsg) //nolint:govet,staticcheck }, } diff --git a/flytestdlib/storage/protobuf_store_test.go b/flytestdlib/storage/protobuf_store_test.go index f0292c4172..019c61e7d4 100644 --- a/flytestdlib/storage/protobuf_store_test.go +++ b/flytestdlib/storage/protobuf_store_test.go @@ -148,13 +148,13 @@ func TestDefaultProtobufStore_HardErrors(t *testing.T) { dummyReadErrorMsg := "Dummy read error" store := &dummyStore{ HeadCb: func(ctx context.Context, reference DataReference) (Metadata, error) { - return MemoryMetadata{}, fmt.Errorf(dummyHeadErrorMsg) //nolint:govet,staticcheck + return MemoryMetadata{}, fmt.Errorf("%s", dummyHeadErrorMsg) //nolint:govet,staticcheck }, WriteRawCb: func(ctx context.Context, reference DataReference, size int64, opts Options, raw io.Reader) error { - return fmt.Errorf(dummyWriteErrorMsg) //nolint:govet,staticcheck + return fmt.Errorf("%s", dummyWriteErrorMsg) //nolint:govet,staticcheck }, ReadRawCb: func(ctx context.Context, reference DataReference) (io.ReadCloser, error) { - return nil, fmt.Errorf(dummyReadErrorMsg) //nolint:govet,staticcheck + return nil, fmt.Errorf("%s", dummyReadErrorMsg) //nolint:govet,staticcheck }, } pbErroneousStore := NewDefaultProtobufStoreWithMetrics(store, metrics.protoMetrics) diff --git a/flytestdlib/utils/constants.go b/flytestdlib/utils/constants.go new file mode 100644 index 0000000000..3c604abcb5 --- /dev/null +++ b/flytestdlib/utils/constants.go @@ -0,0 +1,3 @@ +package utils + +const MaxUniqueIDLength = 30 diff --git a/gen/go/flyteidl2/cacheservice/cacheservice.pb.go b/gen/go/flyteidl2/cacheservice/cacheservice.pb.go new file mode 100644 index 0000000000..0704287268 --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/cacheservice.pb.go @@ -0,0 +1,1314 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/cacheservice/cacheservice.proto + +package cacheservice + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Additional metadata as key-value pairs +type KeyMapMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Values map[string]string `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // Additional metadata as key-value pairs +} + +func (x *KeyMapMetadata) Reset() { + *x = KeyMapMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyMapMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyMapMetadata) ProtoMessage() {} + +func (x *KeyMapMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyMapMetadata.ProtoReflect.Descriptor instead. +func (*KeyMapMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{0} +} + +func (x *KeyMapMetadata) GetValues() map[string]string { + if x != nil { + return x.Values + } + return nil +} + +// Metadata for cached outputs, including the source identifier and timestamps. +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + SourceIdentifier *core.Identifier `protobuf:"bytes,1,opt,name=source_identifier,json=sourceIdentifier,proto3" json:"source_identifier,omitempty"` // Source task or workflow identifier + KeyMap *KeyMapMetadata `protobuf:"bytes,2,opt,name=key_map,json=keyMap,proto3" json:"key_map,omitempty"` // Additional metadata as key-value pairs + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // Creation timestamp + LastUpdatedAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=last_updated_at,json=lastUpdatedAt,proto3" json:"last_updated_at,omitempty"` // Last update timestamp +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{1} +} + +func (x *Metadata) GetSourceIdentifier() *core.Identifier { + if x != nil { + return x.SourceIdentifier + } + return nil +} + +func (x *Metadata) GetKeyMap() *KeyMapMetadata { + if x != nil { + return x.KeyMap + } + return nil +} + +func (x *Metadata) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +func (x *Metadata) GetLastUpdatedAt() *timestamppb.Timestamp { + if x != nil { + return x.LastUpdatedAt + } + return nil +} + +// Represents cached output, either as literals or an URI, with associated metadata. +type CachedOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Output: + // + // *CachedOutput_OutputLiterals + // *CachedOutput_OutputUri + Output isCachedOutput_Output `protobuf_oneof:"output"` + Metadata *Metadata `protobuf:"bytes,3,opt,name=metadata,proto3" json:"metadata,omitempty"` // Associated metadata +} + +func (x *CachedOutput) Reset() { + *x = CachedOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CachedOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CachedOutput) ProtoMessage() {} + +func (x *CachedOutput) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CachedOutput.ProtoReflect.Descriptor instead. +func (*CachedOutput) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{2} +} + +func (m *CachedOutput) GetOutput() isCachedOutput_Output { + if m != nil { + return m.Output + } + return nil +} + +func (x *CachedOutput) GetOutputLiterals() *core.LiteralMap { + if x, ok := x.GetOutput().(*CachedOutput_OutputLiterals); ok { + return x.OutputLiterals + } + return nil +} + +func (x *CachedOutput) GetOutputUri() string { + if x, ok := x.GetOutput().(*CachedOutput_OutputUri); ok { + return x.OutputUri + } + return "" +} + +func (x *CachedOutput) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type isCachedOutput_Output interface { + isCachedOutput_Output() +} + +type CachedOutput_OutputLiterals struct { + OutputLiterals *core.LiteralMap `protobuf:"bytes,1,opt,name=output_literals,json=outputLiterals,proto3,oneof"` // Output literals +} + +type CachedOutput_OutputUri struct { + OutputUri string `protobuf:"bytes,2,opt,name=output_uri,json=outputUri,proto3,oneof"` // URI to output data +} + +func (*CachedOutput_OutputLiterals) isCachedOutput_Output() {} + +func (*CachedOutput_OutputUri) isCachedOutput_Output() {} + +// Request to retrieve cached data by key. +type GetCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key +} + +func (x *GetCacheRequest) Reset() { + *x = GetCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCacheRequest) ProtoMessage() {} + +func (x *GetCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCacheRequest.ProtoReflect.Descriptor instead. +func (*GetCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{3} +} + +func (x *GetCacheRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Response with cached data for a given key. +type GetCacheResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Output *CachedOutput `protobuf:"bytes,1,opt,name=output,proto3" json:"output,omitempty"` // Cached output +} + +func (x *GetCacheResponse) Reset() { + *x = GetCacheResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCacheResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCacheResponse) ProtoMessage() {} + +func (x *GetCacheResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCacheResponse.ProtoReflect.Descriptor instead. +func (*GetCacheResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{4} +} + +func (x *GetCacheResponse) GetOutput() *CachedOutput { + if x != nil { + return x.Output + } + return nil +} + +type OverwriteOutput struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Overwrite bool `protobuf:"varint,1,opt,name=overwrite,proto3" json:"overwrite,omitempty"` // Overwrite flag + DeleteBlob bool `protobuf:"varint,2,opt,name=delete_blob,json=deleteBlob,proto3" json:"delete_blob,omitempty"` // Delete existing blob + MaxAge *durationpb.Duration `protobuf:"bytes,3,opt,name=max_age,json=maxAge,proto3" json:"max_age,omitempty"` // Maximum age of the cached output since last update +} + +func (x *OverwriteOutput) Reset() { + *x = OverwriteOutput{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *OverwriteOutput) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*OverwriteOutput) ProtoMessage() {} + +func (x *OverwriteOutput) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use OverwriteOutput.ProtoReflect.Descriptor instead. +func (*OverwriteOutput) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{5} +} + +func (x *OverwriteOutput) GetOverwrite() bool { + if x != nil { + return x.Overwrite + } + return false +} + +func (x *OverwriteOutput) GetDeleteBlob() bool { + if x != nil { + return x.DeleteBlob + } + return false +} + +func (x *OverwriteOutput) GetMaxAge() *durationpb.Duration { + if x != nil { + return x.MaxAge + } + return nil +} + +// Request to store/update cached data by key. +type PutCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key + Output *CachedOutput `protobuf:"bytes,2,opt,name=output,proto3" json:"output,omitempty"` // Output to cache + Overwrite *OverwriteOutput `protobuf:"bytes,3,opt,name=overwrite,proto3" json:"overwrite,omitempty"` // Overwrite flag if exists +} + +func (x *PutCacheRequest) Reset() { + *x = PutCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutCacheRequest) ProtoMessage() {} + +func (x *PutCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutCacheRequest.ProtoReflect.Descriptor instead. +func (*PutCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{6} +} + +func (x *PutCacheRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *PutCacheRequest) GetOutput() *CachedOutput { + if x != nil { + return x.Output + } + return nil +} + +func (x *PutCacheRequest) GetOverwrite() *OverwriteOutput { + if x != nil { + return x.Overwrite + } + return nil +} + +// Response message of cache store/update operation. +type PutCacheResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *PutCacheResponse) Reset() { + *x = PutCacheResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutCacheResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutCacheResponse) ProtoMessage() {} + +func (x *PutCacheResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutCacheResponse.ProtoReflect.Descriptor instead. +func (*PutCacheResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{7} +} + +// Request to delete cached data by key. +type DeleteCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // Cache key +} + +func (x *DeleteCacheRequest) Reset() { + *x = DeleteCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCacheRequest) ProtoMessage() {} + +func (x *DeleteCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCacheRequest.ProtoReflect.Descriptor instead. +func (*DeleteCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{8} +} + +func (x *DeleteCacheRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +// Response message of cache deletion operation. +type DeleteCacheResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DeleteCacheResponse) Reset() { + *x = DeleteCacheResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCacheResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCacheResponse) ProtoMessage() {} + +func (x *DeleteCacheResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCacheResponse.ProtoReflect.Descriptor instead. +func (*DeleteCacheResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{9} +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +type Reservation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // The unique ID for the reservation - same as the cache key + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` // The unique ID of the owner for the reservation + HeartbeatInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` // Requested reservation extension heartbeat interval + ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` // Expiration timestamp of this reservation +} + +func (x *Reservation) Reset() { + *x = Reservation{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reservation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reservation) ProtoMessage() {} + +func (x *Reservation) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reservation.ProtoReflect.Descriptor instead. +func (*Reservation) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{10} +} + +func (x *Reservation) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Reservation) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +func (x *Reservation) GetHeartbeatInterval() *durationpb.Duration { + if x != nil { + return x.HeartbeatInterval + } + return nil +} + +func (x *Reservation) GetExpiresAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpiresAt + } + return nil +} + +// Request to get or extend a reservation for a cache key +type GetOrExtendReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // The unique ID for the reservation - same as the cache key + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` // The unique ID of the owner for the reservation + HeartbeatInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` // Requested reservation extension heartbeat interval +} + +func (x *GetOrExtendReservationRequest) Reset() { + *x = GetOrExtendReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOrExtendReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrExtendReservationRequest) ProtoMessage() {} + +func (x *GetOrExtendReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrExtendReservationRequest.ProtoReflect.Descriptor instead. +func (*GetOrExtendReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{11} +} + +func (x *GetOrExtendReservationRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *GetOrExtendReservationRequest) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +func (x *GetOrExtendReservationRequest) GetHeartbeatInterval() *durationpb.Duration { + if x != nil { + return x.HeartbeatInterval + } + return nil +} + +// Request to get or extend a reservation for a cache key +type GetOrExtendReservationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Reservation *Reservation `protobuf:"bytes,1,opt,name=reservation,proto3" json:"reservation,omitempty"` // The reservation that was created or extended +} + +func (x *GetOrExtendReservationResponse) Reset() { + *x = GetOrExtendReservationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOrExtendReservationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrExtendReservationResponse) ProtoMessage() {} + +func (x *GetOrExtendReservationResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrExtendReservationResponse.ProtoReflect.Descriptor instead. +func (*GetOrExtendReservationResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{12} +} + +func (x *GetOrExtendReservationResponse) GetReservation() *Reservation { + if x != nil { + return x.Reservation + } + return nil +} + +// Request to release the reservation for a cache key +type ReleaseReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` // The unique ID for the reservation - same as the cache key + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` // The unique ID of the owner for the reservation +} + +func (x *ReleaseReservationRequest) Reset() { + *x = ReleaseReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseReservationRequest) ProtoMessage() {} + +func (x *ReleaseReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseReservationRequest.ProtoReflect.Descriptor instead. +func (*ReleaseReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{13} +} + +func (x *ReleaseReservationRequest) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *ReleaseReservationRequest) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +// Response message of release reservation operation. +type ReleaseReservationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReleaseReservationResponse) Reset() { + *x = ReleaseReservationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseReservationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseReservationResponse) ProtoMessage() {} + +func (x *ReleaseReservationResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseReservationResponse.ProtoReflect.Descriptor instead. +func (*ReleaseReservationResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP(), []int{14} +} + +var File_flyteidl2_cacheservice_cacheservice_proto protoreflect.FileDescriptor + +var file_flyteidl2_cacheservice_cacheservice_proto_rawDesc = []byte{ + 0x0a, 0x29, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x93, + 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x11, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x06, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, + 0x12, 0x42, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x22, 0xbe, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x4f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, + 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x0e, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x12, 0x1f, 0x0a, 0x0a, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x3c, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x08, 0x0a, 0x06, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x50, 0x0a, 0x10, 0x47, 0x65, + 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, + 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x84, 0x01, 0x0a, + 0x0f, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x1f, + 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x6c, 0x6f, 0x62, 0x12, + 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x6d, 0x61, 0x78, + 0x41, 0x67, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, + 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, + 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x52, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x12, + 0x0a, 0x10, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x26, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x15, 0x0a, 0x13, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x48, + 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, + 0x73, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0x67, 0x0a, 0x1e, + 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x45, + 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, + 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xac, 0x04, + 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, + 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x61, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2a, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x35, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x7b, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, + 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xe6, 0x01, 0x0a, + 0x1a, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x11, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, + 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xa2, 0x02, + 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x16, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xca, 0x02, 0x16, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xe2, 0x02, 0x22, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_cacheservice_cacheservice_proto_rawDescOnce sync.Once + file_flyteidl2_cacheservice_cacheservice_proto_rawDescData = file_flyteidl2_cacheservice_cacheservice_proto_rawDesc +) + +func file_flyteidl2_cacheservice_cacheservice_proto_rawDescGZIP() []byte { + file_flyteidl2_cacheservice_cacheservice_proto_rawDescOnce.Do(func() { + file_flyteidl2_cacheservice_cacheservice_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_cacheservice_cacheservice_proto_rawDescData) + }) + return file_flyteidl2_cacheservice_cacheservice_proto_rawDescData +} + +var file_flyteidl2_cacheservice_cacheservice_proto_msgTypes = make([]protoimpl.MessageInfo, 16) +var file_flyteidl2_cacheservice_cacheservice_proto_goTypes = []interface{}{ + (*KeyMapMetadata)(nil), // 0: flyteidl2.cacheservice.KeyMapMetadata + (*Metadata)(nil), // 1: flyteidl2.cacheservice.Metadata + (*CachedOutput)(nil), // 2: flyteidl2.cacheservice.CachedOutput + (*GetCacheRequest)(nil), // 3: flyteidl2.cacheservice.GetCacheRequest + (*GetCacheResponse)(nil), // 4: flyteidl2.cacheservice.GetCacheResponse + (*OverwriteOutput)(nil), // 5: flyteidl2.cacheservice.OverwriteOutput + (*PutCacheRequest)(nil), // 6: flyteidl2.cacheservice.PutCacheRequest + (*PutCacheResponse)(nil), // 7: flyteidl2.cacheservice.PutCacheResponse + (*DeleteCacheRequest)(nil), // 8: flyteidl2.cacheservice.DeleteCacheRequest + (*DeleteCacheResponse)(nil), // 9: flyteidl2.cacheservice.DeleteCacheResponse + (*Reservation)(nil), // 10: flyteidl2.cacheservice.Reservation + (*GetOrExtendReservationRequest)(nil), // 11: flyteidl2.cacheservice.GetOrExtendReservationRequest + (*GetOrExtendReservationResponse)(nil), // 12: flyteidl2.cacheservice.GetOrExtendReservationResponse + (*ReleaseReservationRequest)(nil), // 13: flyteidl2.cacheservice.ReleaseReservationRequest + (*ReleaseReservationResponse)(nil), // 14: flyteidl2.cacheservice.ReleaseReservationResponse + nil, // 15: flyteidl2.cacheservice.KeyMapMetadata.ValuesEntry + (*core.Identifier)(nil), // 16: flyteidl2.core.Identifier + (*timestamppb.Timestamp)(nil), // 17: google.protobuf.Timestamp + (*core.LiteralMap)(nil), // 18: flyteidl2.core.LiteralMap + (*durationpb.Duration)(nil), // 19: google.protobuf.Duration +} +var file_flyteidl2_cacheservice_cacheservice_proto_depIdxs = []int32{ + 15, // 0: flyteidl2.cacheservice.KeyMapMetadata.values:type_name -> flyteidl2.cacheservice.KeyMapMetadata.ValuesEntry + 16, // 1: flyteidl2.cacheservice.Metadata.source_identifier:type_name -> flyteidl2.core.Identifier + 0, // 2: flyteidl2.cacheservice.Metadata.key_map:type_name -> flyteidl2.cacheservice.KeyMapMetadata + 17, // 3: flyteidl2.cacheservice.Metadata.created_at:type_name -> google.protobuf.Timestamp + 17, // 4: flyteidl2.cacheservice.Metadata.last_updated_at:type_name -> google.protobuf.Timestamp + 18, // 5: flyteidl2.cacheservice.CachedOutput.output_literals:type_name -> flyteidl2.core.LiteralMap + 1, // 6: flyteidl2.cacheservice.CachedOutput.metadata:type_name -> flyteidl2.cacheservice.Metadata + 2, // 7: flyteidl2.cacheservice.GetCacheResponse.output:type_name -> flyteidl2.cacheservice.CachedOutput + 19, // 8: flyteidl2.cacheservice.OverwriteOutput.max_age:type_name -> google.protobuf.Duration + 2, // 9: flyteidl2.cacheservice.PutCacheRequest.output:type_name -> flyteidl2.cacheservice.CachedOutput + 5, // 10: flyteidl2.cacheservice.PutCacheRequest.overwrite:type_name -> flyteidl2.cacheservice.OverwriteOutput + 19, // 11: flyteidl2.cacheservice.Reservation.heartbeat_interval:type_name -> google.protobuf.Duration + 17, // 12: flyteidl2.cacheservice.Reservation.expires_at:type_name -> google.protobuf.Timestamp + 19, // 13: flyteidl2.cacheservice.GetOrExtendReservationRequest.heartbeat_interval:type_name -> google.protobuf.Duration + 10, // 14: flyteidl2.cacheservice.GetOrExtendReservationResponse.reservation:type_name -> flyteidl2.cacheservice.Reservation + 3, // 15: flyteidl2.cacheservice.CacheService.Get:input_type -> flyteidl2.cacheservice.GetCacheRequest + 6, // 16: flyteidl2.cacheservice.CacheService.Put:input_type -> flyteidl2.cacheservice.PutCacheRequest + 8, // 17: flyteidl2.cacheservice.CacheService.Delete:input_type -> flyteidl2.cacheservice.DeleteCacheRequest + 11, // 18: flyteidl2.cacheservice.CacheService.GetOrExtendReservation:input_type -> flyteidl2.cacheservice.GetOrExtendReservationRequest + 13, // 19: flyteidl2.cacheservice.CacheService.ReleaseReservation:input_type -> flyteidl2.cacheservice.ReleaseReservationRequest + 4, // 20: flyteidl2.cacheservice.CacheService.Get:output_type -> flyteidl2.cacheservice.GetCacheResponse + 7, // 21: flyteidl2.cacheservice.CacheService.Put:output_type -> flyteidl2.cacheservice.PutCacheResponse + 9, // 22: flyteidl2.cacheservice.CacheService.Delete:output_type -> flyteidl2.cacheservice.DeleteCacheResponse + 12, // 23: flyteidl2.cacheservice.CacheService.GetOrExtendReservation:output_type -> flyteidl2.cacheservice.GetOrExtendReservationResponse + 14, // 24: flyteidl2.cacheservice.CacheService.ReleaseReservation:output_type -> flyteidl2.cacheservice.ReleaseReservationResponse + 20, // [20:25] is the sub-list for method output_type + 15, // [15:20] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name +} + +func init() { file_flyteidl2_cacheservice_cacheservice_proto_init() } +func file_flyteidl2_cacheservice_cacheservice_proto_init() { + if File_flyteidl2_cacheservice_cacheservice_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyMapMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CachedOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCacheResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*OverwriteOutput); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutCacheResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCacheResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reservation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOrExtendReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOrExtendReservationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseReservationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_flyteidl2_cacheservice_cacheservice_proto_msgTypes[2].OneofWrappers = []interface{}{ + (*CachedOutput_OutputLiterals)(nil), + (*CachedOutput_OutputUri)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_cacheservice_cacheservice_proto_rawDesc, + NumEnums: 0, + NumMessages: 16, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_flyteidl2_cacheservice_cacheservice_proto_goTypes, + DependencyIndexes: file_flyteidl2_cacheservice_cacheservice_proto_depIdxs, + MessageInfos: file_flyteidl2_cacheservice_cacheservice_proto_msgTypes, + }.Build() + File_flyteidl2_cacheservice_cacheservice_proto = out.File + file_flyteidl2_cacheservice_cacheservice_proto_rawDesc = nil + file_flyteidl2_cacheservice_cacheservice_proto_goTypes = nil + file_flyteidl2_cacheservice_cacheservice_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/cacheservice/cacheservice.pb.validate.go b/gen/go/flyteidl2/cacheservice/cacheservice.pb.validate.go new file mode 100644 index 0000000000..f00005cad4 --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/cacheservice.pb.validate.go @@ -0,0 +1,2006 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/cacheservice/cacheservice.proto + +package cacheservice + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on KeyMapMetadata with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyMapMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyMapMetadata with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyMapMetadataMultiError, +// or nil if none found. +func (m *KeyMapMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyMapMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Values + + if len(errors) > 0 { + return KeyMapMetadataMultiError(errors) + } + + return nil +} + +// KeyMapMetadataMultiError is an error wrapping multiple validation errors +// returned by KeyMapMetadata.ValidateAll() if the designated constraints +// aren't met. +type KeyMapMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyMapMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyMapMetadataMultiError) AllErrors() []error { return m } + +// KeyMapMetadataValidationError is the validation error returned by +// KeyMapMetadata.Validate if the designated constraints aren't met. +type KeyMapMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyMapMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyMapMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyMapMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyMapMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyMapMetadataValidationError) ErrorName() string { return "KeyMapMetadataValidationError" } + +// Error satisfies the builtin error interface +func (e KeyMapMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyMapMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyMapMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyMapMetadataValidationError{} + +// Validate checks the field values on Metadata with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Metadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataMultiError, or nil +// if none found. +func (m *Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetSourceIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "SourceIdentifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "SourceIdentifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetSourceIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "SourceIdentifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetKeyMap()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "KeyMap", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "KeyMap", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyMap()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "KeyMap", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLastUpdatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "LastUpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, MetadataValidationError{ + field: "LastUpdatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLastUpdatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return MetadataValidationError{ + field: "LastUpdatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return MetadataMultiError(errors) + } + + return nil +} + +// MetadataMultiError is an error wrapping multiple validation errors returned +// by Metadata.ValidateAll() if the designated constraints aren't met. +type MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMultiError) AllErrors() []error { return m } + +// MetadataValidationError is the validation error returned by +// Metadata.Validate if the designated constraints aren't met. +type MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataValidationError{} + +// Validate checks the field values on CachedOutput with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *CachedOutput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CachedOutput with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in CachedOutputMultiError, or +// nil if none found. +func (m *CachedOutput) ValidateAll() error { + return m.validate(true) +} + +func (m *CachedOutput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CachedOutputValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CachedOutputValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CachedOutputValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.Output.(type) { + case *CachedOutput_OutputLiterals: + if v == nil { + err := CachedOutputValidationError{ + field: "Output", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputLiterals()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CachedOutputValidationError{ + field: "OutputLiterals", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CachedOutputValidationError{ + field: "OutputLiterals", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputLiterals()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CachedOutputValidationError{ + field: "OutputLiterals", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *CachedOutput_OutputUri: + if v == nil { + err := CachedOutputValidationError{ + field: "Output", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for OutputUri + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return CachedOutputMultiError(errors) + } + + return nil +} + +// CachedOutputMultiError is an error wrapping multiple validation errors +// returned by CachedOutput.ValidateAll() if the designated constraints aren't met. +type CachedOutputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CachedOutputMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CachedOutputMultiError) AllErrors() []error { return m } + +// CachedOutputValidationError is the validation error returned by +// CachedOutput.Validate if the designated constraints aren't met. +type CachedOutputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CachedOutputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CachedOutputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CachedOutputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CachedOutputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CachedOutputValidationError) ErrorName() string { return "CachedOutputValidationError" } + +// Error satisfies the builtin error interface +func (e CachedOutputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCachedOutput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CachedOutputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CachedOutputValidationError{} + +// Validate checks the field values on GetCacheRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetCacheRequestMultiError, or nil if none found. +func (m *GetCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + if len(errors) > 0 { + return GetCacheRequestMultiError(errors) + } + + return nil +} + +// GetCacheRequestMultiError is an error wrapping multiple validation errors +// returned by GetCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type GetCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetCacheRequestMultiError) AllErrors() []error { return m } + +// GetCacheRequestValidationError is the validation error returned by +// GetCacheRequest.Validate if the designated constraints aren't met. +type GetCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetCacheRequestValidationError) ErrorName() string { return "GetCacheRequestValidationError" } + +// Error satisfies the builtin error interface +func (e GetCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetCacheRequestValidationError{} + +// Validate checks the field values on GetCacheResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetCacheResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetCacheResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetCacheResponseMultiError, or nil if none found. +func (m *GetCacheResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetCacheResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetOutput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetCacheResponseValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetCacheResponseValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetCacheResponseValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetCacheResponseMultiError(errors) + } + + return nil +} + +// GetCacheResponseMultiError is an error wrapping multiple validation errors +// returned by GetCacheResponse.ValidateAll() if the designated constraints +// aren't met. +type GetCacheResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetCacheResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetCacheResponseMultiError) AllErrors() []error { return m } + +// GetCacheResponseValidationError is the validation error returned by +// GetCacheResponse.Validate if the designated constraints aren't met. +type GetCacheResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetCacheResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetCacheResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetCacheResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetCacheResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetCacheResponseValidationError) ErrorName() string { return "GetCacheResponseValidationError" } + +// Error satisfies the builtin error interface +func (e GetCacheResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetCacheResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetCacheResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetCacheResponseValidationError{} + +// Validate checks the field values on OverwriteOutput with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *OverwriteOutput) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on OverwriteOutput with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// OverwriteOutputMultiError, or nil if none found. +func (m *OverwriteOutput) ValidateAll() error { + return m.validate(true) +} + +func (m *OverwriteOutput) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Overwrite + + // no validation rules for DeleteBlob + + if all { + switch v := interface{}(m.GetMaxAge()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, OverwriteOutputValidationError{ + field: "MaxAge", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, OverwriteOutputValidationError{ + field: "MaxAge", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMaxAge()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return OverwriteOutputValidationError{ + field: "MaxAge", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return OverwriteOutputMultiError(errors) + } + + return nil +} + +// OverwriteOutputMultiError is an error wrapping multiple validation errors +// returned by OverwriteOutput.ValidateAll() if the designated constraints +// aren't met. +type OverwriteOutputMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m OverwriteOutputMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m OverwriteOutputMultiError) AllErrors() []error { return m } + +// OverwriteOutputValidationError is the validation error returned by +// OverwriteOutput.Validate if the designated constraints aren't met. +type OverwriteOutputValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e OverwriteOutputValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e OverwriteOutputValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e OverwriteOutputValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e OverwriteOutputValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e OverwriteOutputValidationError) ErrorName() string { return "OverwriteOutputValidationError" } + +// Error satisfies the builtin error interface +func (e OverwriteOutputValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sOverwriteOutput.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = OverwriteOutputValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = OverwriteOutputValidationError{} + +// Validate checks the field values on PutCacheRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PutCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PutCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PutCacheRequestMultiError, or nil if none found. +func (m *PutCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *PutCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + if all { + switch v := interface{}(m.GetOutput()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutput()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PutCacheRequestValidationError{ + field: "Output", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOverwrite()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Overwrite", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Overwrite", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOverwrite()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PutCacheRequestValidationError{ + field: "Overwrite", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PutCacheRequestMultiError(errors) + } + + return nil +} + +// PutCacheRequestMultiError is an error wrapping multiple validation errors +// returned by PutCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type PutCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PutCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PutCacheRequestMultiError) AllErrors() []error { return m } + +// PutCacheRequestValidationError is the validation error returned by +// PutCacheRequest.Validate if the designated constraints aren't met. +type PutCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PutCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PutCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PutCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PutCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PutCacheRequestValidationError) ErrorName() string { return "PutCacheRequestValidationError" } + +// Error satisfies the builtin error interface +func (e PutCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPutCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PutCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PutCacheRequestValidationError{} + +// Validate checks the field values on PutCacheResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PutCacheResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PutCacheResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PutCacheResponseMultiError, or nil if none found. +func (m *PutCacheResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *PutCacheResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return PutCacheResponseMultiError(errors) + } + + return nil +} + +// PutCacheResponseMultiError is an error wrapping multiple validation errors +// returned by PutCacheResponse.ValidateAll() if the designated constraints +// aren't met. +type PutCacheResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PutCacheResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PutCacheResponseMultiError) AllErrors() []error { return m } + +// PutCacheResponseValidationError is the validation error returned by +// PutCacheResponse.Validate if the designated constraints aren't met. +type PutCacheResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PutCacheResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PutCacheResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PutCacheResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PutCacheResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PutCacheResponseValidationError) ErrorName() string { return "PutCacheResponseValidationError" } + +// Error satisfies the builtin error interface +func (e PutCacheResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPutCacheResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PutCacheResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PutCacheResponseValidationError{} + +// Validate checks the field values on DeleteCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteCacheRequestMultiError, or nil if none found. +func (m *DeleteCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + if len(errors) > 0 { + return DeleteCacheRequestMultiError(errors) + } + + return nil +} + +// DeleteCacheRequestMultiError is an error wrapping multiple validation errors +// returned by DeleteCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type DeleteCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteCacheRequestMultiError) AllErrors() []error { return m } + +// DeleteCacheRequestValidationError is the validation error returned by +// DeleteCacheRequest.Validate if the designated constraints aren't met. +type DeleteCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteCacheRequestValidationError) ErrorName() string { + return "DeleteCacheRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteCacheRequestValidationError{} + +// Validate checks the field values on DeleteCacheResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteCacheResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteCacheResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteCacheResponseMultiError, or nil if none found. +func (m *DeleteCacheResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteCacheResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return DeleteCacheResponseMultiError(errors) + } + + return nil +} + +// DeleteCacheResponseMultiError is an error wrapping multiple validation +// errors returned by DeleteCacheResponse.ValidateAll() if the designated +// constraints aren't met. +type DeleteCacheResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteCacheResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteCacheResponseMultiError) AllErrors() []error { return m } + +// DeleteCacheResponseValidationError is the validation error returned by +// DeleteCacheResponse.Validate if the designated constraints aren't met. +type DeleteCacheResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteCacheResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteCacheResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteCacheResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteCacheResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteCacheResponseValidationError) ErrorName() string { + return "DeleteCacheResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteCacheResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteCacheResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteCacheResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteCacheResponseValidationError{} + +// Validate checks the field values on Reservation with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Reservation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Reservation with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ReservationMultiError, or +// nil if none found. +func (m *Reservation) ValidateAll() error { + return m.validate(true) +} + +func (m *Reservation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for OwnerId + + if all { + switch v := interface{}(m.GetHeartbeatInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeartbeatInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpiresAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpiresAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ReservationMultiError(errors) + } + + return nil +} + +// ReservationMultiError is an error wrapping multiple validation errors +// returned by Reservation.ValidateAll() if the designated constraints aren't met. +type ReservationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReservationMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReservationMultiError) AllErrors() []error { return m } + +// ReservationValidationError is the validation error returned by +// Reservation.Validate if the designated constraints aren't met. +type ReservationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReservationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReservationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReservationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReservationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReservationValidationError) ErrorName() string { return "ReservationValidationError" } + +// Error satisfies the builtin error interface +func (e ReservationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReservation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReservationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReservationValidationError{} + +// Validate checks the field values on GetOrExtendReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetOrExtendReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetOrExtendReservationRequest with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetOrExtendReservationRequestMultiError, or nil if none found. +func (m *GetOrExtendReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetOrExtendReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for OwnerId + + if all { + switch v := interface{}(m.GetHeartbeatInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeartbeatInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetOrExtendReservationRequestMultiError(errors) + } + + return nil +} + +// GetOrExtendReservationRequestMultiError is an error wrapping multiple +// validation errors returned by GetOrExtendReservationRequest.ValidateAll() +// if the designated constraints aren't met. +type GetOrExtendReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetOrExtendReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetOrExtendReservationRequestMultiError) AllErrors() []error { return m } + +// GetOrExtendReservationRequestValidationError is the validation error +// returned by GetOrExtendReservationRequest.Validate if the designated +// constraints aren't met. +type GetOrExtendReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetOrExtendReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetOrExtendReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetOrExtendReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetOrExtendReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetOrExtendReservationRequestValidationError) ErrorName() string { + return "GetOrExtendReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetOrExtendReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetOrExtendReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetOrExtendReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetOrExtendReservationRequestValidationError{} + +// Validate checks the field values on GetOrExtendReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetOrExtendReservationResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetOrExtendReservationResponse with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetOrExtendReservationResponseMultiError, or nil if none found. +func (m *GetOrExtendReservationResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetOrExtendReservationResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReservation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReservation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetOrExtendReservationResponseMultiError(errors) + } + + return nil +} + +// GetOrExtendReservationResponseMultiError is an error wrapping multiple +// validation errors returned by GetOrExtendReservationResponse.ValidateAll() +// if the designated constraints aren't met. +type GetOrExtendReservationResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetOrExtendReservationResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetOrExtendReservationResponseMultiError) AllErrors() []error { return m } + +// GetOrExtendReservationResponseValidationError is the validation error +// returned by GetOrExtendReservationResponse.Validate if the designated +// constraints aren't met. +type GetOrExtendReservationResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetOrExtendReservationResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetOrExtendReservationResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetOrExtendReservationResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetOrExtendReservationResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetOrExtendReservationResponseValidationError) ErrorName() string { + return "GetOrExtendReservationResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetOrExtendReservationResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetOrExtendReservationResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetOrExtendReservationResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetOrExtendReservationResponseValidationError{} + +// Validate checks the field values on ReleaseReservationRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ReleaseReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReleaseReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ReleaseReservationRequestMultiError, or nil if none found. +func (m *ReleaseReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ReleaseReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for OwnerId + + if len(errors) > 0 { + return ReleaseReservationRequestMultiError(errors) + } + + return nil +} + +// ReleaseReservationRequestMultiError is an error wrapping multiple validation +// errors returned by ReleaseReservationRequest.ValidateAll() if the +// designated constraints aren't met. +type ReleaseReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReleaseReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReleaseReservationRequestMultiError) AllErrors() []error { return m } + +// ReleaseReservationRequestValidationError is the validation error returned by +// ReleaseReservationRequest.Validate if the designated constraints aren't met. +type ReleaseReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReleaseReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReleaseReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReleaseReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReleaseReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReleaseReservationRequestValidationError) ErrorName() string { + return "ReleaseReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ReleaseReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReleaseReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReleaseReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReleaseReservationRequestValidationError{} + +// Validate checks the field values on ReleaseReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ReleaseReservationResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReleaseReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ReleaseReservationResponseMultiError, or nil if none found. +func (m *ReleaseReservationResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ReleaseReservationResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ReleaseReservationResponseMultiError(errors) + } + + return nil +} + +// ReleaseReservationResponseMultiError is an error wrapping multiple +// validation errors returned by ReleaseReservationResponse.ValidateAll() if +// the designated constraints aren't met. +type ReleaseReservationResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReleaseReservationResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReleaseReservationResponseMultiError) AllErrors() []error { return m } + +// ReleaseReservationResponseValidationError is the validation error returned +// by ReleaseReservationResponse.Validate if the designated constraints aren't met. +type ReleaseReservationResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReleaseReservationResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReleaseReservationResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReleaseReservationResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReleaseReservationResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReleaseReservationResponseValidationError) ErrorName() string { + return "ReleaseReservationResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ReleaseReservationResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReleaseReservationResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReleaseReservationResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReleaseReservationResponseValidationError{} diff --git a/gen/go/flyteidl2/cacheservice/cacheservice_grpc.pb.go b/gen/go/flyteidl2/cacheservice/cacheservice_grpc.pb.go new file mode 100644 index 0000000000..dcf1587e3e --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/cacheservice_grpc.pb.go @@ -0,0 +1,265 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: flyteidl2/cacheservice/cacheservice.proto + +package cacheservice + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CacheService_Get_FullMethodName = "/flyteidl2.cacheservice.CacheService/Get" + CacheService_Put_FullMethodName = "/flyteidl2.cacheservice.CacheService/Put" + CacheService_Delete_FullMethodName = "/flyteidl2.cacheservice.CacheService/Delete" + CacheService_GetOrExtendReservation_FullMethodName = "/flyteidl2.cacheservice.CacheService/GetOrExtendReservation" + CacheService_ReleaseReservation_FullMethodName = "/flyteidl2.cacheservice.CacheService/ReleaseReservation" +) + +// CacheServiceClient is the client API for CacheService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CacheServiceClient interface { + // Retrieves cached data by key. + Get(ctx context.Context, in *GetCacheRequest, opts ...grpc.CallOption) (*GetCacheResponse, error) + // Stores or updates cached data by key. + Put(ctx context.Context, in *PutCacheRequest, opts ...grpc.CallOption) (*PutCacheResponse, error) + // Deletes cached data by key. + Delete(ctx context.Context, in *DeleteCacheRequest, opts ...grpc.CallOption) (*DeleteCacheResponse, error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*GetOrExtendReservationResponse, error) + // Release the reservation for a cache key + ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*ReleaseReservationResponse, error) +} + +type cacheServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCacheServiceClient(cc grpc.ClientConnInterface) CacheServiceClient { + return &cacheServiceClient{cc} +} + +func (c *cacheServiceClient) Get(ctx context.Context, in *GetCacheRequest, opts ...grpc.CallOption) (*GetCacheResponse, error) { + out := new(GetCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Get_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) Put(ctx context.Context, in *PutCacheRequest, opts ...grpc.CallOption) (*PutCacheResponse, error) { + out := new(PutCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Put_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) Delete(ctx context.Context, in *DeleteCacheRequest, opts ...grpc.CallOption) (*DeleteCacheResponse, error) { + out := new(DeleteCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Delete_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*GetOrExtendReservationResponse, error) { + out := new(GetOrExtendReservationResponse) + err := c.cc.Invoke(ctx, CacheService_GetOrExtendReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*ReleaseReservationResponse, error) { + out := new(ReleaseReservationResponse) + err := c.cc.Invoke(ctx, CacheService_ReleaseReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CacheServiceServer is the server API for CacheService service. +// All implementations should embed UnimplementedCacheServiceServer +// for forward compatibility +type CacheServiceServer interface { + // Retrieves cached data by key. + Get(context.Context, *GetCacheRequest) (*GetCacheResponse, error) + // Stores or updates cached data by key. + Put(context.Context, *PutCacheRequest) (*PutCacheResponse, error) + // Deletes cached data by key. + Delete(context.Context, *DeleteCacheRequest) (*DeleteCacheResponse, error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*GetOrExtendReservationResponse, error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *ReleaseReservationRequest) (*ReleaseReservationResponse, error) +} + +// UnimplementedCacheServiceServer should be embedded to have forward compatible implementations. +type UnimplementedCacheServiceServer struct { +} + +func (UnimplementedCacheServiceServer) Get(context.Context, *GetCacheRequest) (*GetCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedCacheServiceServer) Put(context.Context, *PutCacheRequest) (*PutCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (UnimplementedCacheServiceServer) Delete(context.Context, *DeleteCacheRequest) (*DeleteCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedCacheServiceServer) GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*GetOrExtendReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrExtendReservation not implemented") +} +func (UnimplementedCacheServiceServer) ReleaseReservation(context.Context, *ReleaseReservationRequest) (*ReleaseReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseReservation not implemented") +} + +// UnsafeCacheServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CacheServiceServer will +// result in compilation errors. +type UnsafeCacheServiceServer interface { + mustEmbedUnimplementedCacheServiceServer() +} + +func RegisterCacheServiceServer(s grpc.ServiceRegistrar, srv CacheServiceServer) { + s.RegisterService(&CacheService_ServiceDesc, srv) +} + +func _CacheService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Get_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Get(ctx, req.(*GetCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Put_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Put(ctx, req.(*PutCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Delete_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Delete(ctx, req.(*DeleteCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_GetOrExtendReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOrExtendReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).GetOrExtendReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_GetOrExtendReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).GetOrExtendReservation(ctx, req.(*GetOrExtendReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_ReleaseReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).ReleaseReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_ReleaseReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).ReleaseReservation(ctx, req.(*ReleaseReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CacheService_ServiceDesc is the grpc.ServiceDesc for CacheService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CacheService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "flyteidl2.cacheservice.CacheService", + HandlerType: (*CacheServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _CacheService_Get_Handler, + }, + { + MethodName: "Put", + Handler: _CacheService_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _CacheService_Delete_Handler, + }, + { + MethodName: "GetOrExtendReservation", + Handler: _CacheService_GetOrExtendReservation_Handler, + }, + { + MethodName: "ReleaseReservation", + Handler: _CacheService_ReleaseReservation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "flyteidl2/cacheservice/cacheservice.proto", +} diff --git a/gen/go/flyteidl2/cacheservice/cacheserviceconnect/cacheservice.connect.go b/gen/go/flyteidl2/cacheservice/cacheserviceconnect/cacheservice.connect.go new file mode 100644 index 0000000000..ca9ca381dc --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/cacheserviceconnect/cacheservice.connect.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: flyteidl2/cacheservice/cacheservice.proto + +package cacheserviceconnect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + cacheservice "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // CacheServiceName is the fully-qualified name of the CacheService service. + CacheServiceName = "flyteidl2.cacheservice.CacheService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // CacheServiceGetProcedure is the fully-qualified name of the CacheService's Get RPC. + CacheServiceGetProcedure = "/flyteidl2.cacheservice.CacheService/Get" + // CacheServicePutProcedure is the fully-qualified name of the CacheService's Put RPC. + CacheServicePutProcedure = "/flyteidl2.cacheservice.CacheService/Put" + // CacheServiceDeleteProcedure is the fully-qualified name of the CacheService's Delete RPC. + CacheServiceDeleteProcedure = "/flyteidl2.cacheservice.CacheService/Delete" + // CacheServiceGetOrExtendReservationProcedure is the fully-qualified name of the CacheService's + // GetOrExtendReservation RPC. + CacheServiceGetOrExtendReservationProcedure = "/flyteidl2.cacheservice.CacheService/GetOrExtendReservation" + // CacheServiceReleaseReservationProcedure is the fully-qualified name of the CacheService's + // ReleaseReservation RPC. + CacheServiceReleaseReservationProcedure = "/flyteidl2.cacheservice.CacheService/ReleaseReservation" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + cacheServiceServiceDescriptor = cacheservice.File_flyteidl2_cacheservice_cacheservice_proto.Services().ByName("CacheService") + cacheServiceGetMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Get") + cacheServicePutMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Put") + cacheServiceDeleteMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Delete") + cacheServiceGetOrExtendReservationMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("GetOrExtendReservation") + cacheServiceReleaseReservationMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("ReleaseReservation") +) + +// CacheServiceClient is a client for the flyteidl2.cacheservice.CacheService service. +type CacheServiceClient interface { + // Retrieves cached data by key. + Get(context.Context, *connect.Request[cacheservice.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) + // Stores or updates cached data by key. + Put(context.Context, *connect.Request[cacheservice.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) + // Deletes cached data by key. + Delete(context.Context, *connect.Request[cacheservice.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *connect.Request[cacheservice.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *connect.Request[cacheservice.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) +} + +// NewCacheServiceClient constructs a client for the flyteidl2.cacheservice.CacheService service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewCacheServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) CacheServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &cacheServiceClient{ + get: connect.NewClient[cacheservice.GetCacheRequest, cacheservice.GetCacheResponse]( + httpClient, + baseURL+CacheServiceGetProcedure, + connect.WithSchema(cacheServiceGetMethodDescriptor), + connect.WithClientOptions(opts...), + ), + put: connect.NewClient[cacheservice.PutCacheRequest, cacheservice.PutCacheResponse]( + httpClient, + baseURL+CacheServicePutProcedure, + connect.WithSchema(cacheServicePutMethodDescriptor), + connect.WithClientOptions(opts...), + ), + delete: connect.NewClient[cacheservice.DeleteCacheRequest, cacheservice.DeleteCacheResponse]( + httpClient, + baseURL+CacheServiceDeleteProcedure, + connect.WithSchema(cacheServiceDeleteMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getOrExtendReservation: connect.NewClient[cacheservice.GetOrExtendReservationRequest, cacheservice.GetOrExtendReservationResponse]( + httpClient, + baseURL+CacheServiceGetOrExtendReservationProcedure, + connect.WithSchema(cacheServiceGetOrExtendReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + releaseReservation: connect.NewClient[cacheservice.ReleaseReservationRequest, cacheservice.ReleaseReservationResponse]( + httpClient, + baseURL+CacheServiceReleaseReservationProcedure, + connect.WithSchema(cacheServiceReleaseReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// cacheServiceClient implements CacheServiceClient. +type cacheServiceClient struct { + get *connect.Client[cacheservice.GetCacheRequest, cacheservice.GetCacheResponse] + put *connect.Client[cacheservice.PutCacheRequest, cacheservice.PutCacheResponse] + delete *connect.Client[cacheservice.DeleteCacheRequest, cacheservice.DeleteCacheResponse] + getOrExtendReservation *connect.Client[cacheservice.GetOrExtendReservationRequest, cacheservice.GetOrExtendReservationResponse] + releaseReservation *connect.Client[cacheservice.ReleaseReservationRequest, cacheservice.ReleaseReservationResponse] +} + +// Get calls flyteidl2.cacheservice.CacheService.Get. +func (c *cacheServiceClient) Get(ctx context.Context, req *connect.Request[cacheservice.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) { + return c.get.CallUnary(ctx, req) +} + +// Put calls flyteidl2.cacheservice.CacheService.Put. +func (c *cacheServiceClient) Put(ctx context.Context, req *connect.Request[cacheservice.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) { + return c.put.CallUnary(ctx, req) +} + +// Delete calls flyteidl2.cacheservice.CacheService.Delete. +func (c *cacheServiceClient) Delete(ctx context.Context, req *connect.Request[cacheservice.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) { + return c.delete.CallUnary(ctx, req) +} + +// GetOrExtendReservation calls flyteidl2.cacheservice.CacheService.GetOrExtendReservation. +func (c *cacheServiceClient) GetOrExtendReservation(ctx context.Context, req *connect.Request[cacheservice.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) { + return c.getOrExtendReservation.CallUnary(ctx, req) +} + +// ReleaseReservation calls flyteidl2.cacheservice.CacheService.ReleaseReservation. +func (c *cacheServiceClient) ReleaseReservation(ctx context.Context, req *connect.Request[cacheservice.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) { + return c.releaseReservation.CallUnary(ctx, req) +} + +// CacheServiceHandler is an implementation of the flyteidl2.cacheservice.CacheService service. +type CacheServiceHandler interface { + // Retrieves cached data by key. + Get(context.Context, *connect.Request[cacheservice.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) + // Stores or updates cached data by key. + Put(context.Context, *connect.Request[cacheservice.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) + // Deletes cached data by key. + Delete(context.Context, *connect.Request[cacheservice.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *connect.Request[cacheservice.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *connect.Request[cacheservice.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) +} + +// NewCacheServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewCacheServiceHandler(svc CacheServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + cacheServiceGetHandler := connect.NewUnaryHandler( + CacheServiceGetProcedure, + svc.Get, + connect.WithSchema(cacheServiceGetMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServicePutHandler := connect.NewUnaryHandler( + CacheServicePutProcedure, + svc.Put, + connect.WithSchema(cacheServicePutMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceDeleteHandler := connect.NewUnaryHandler( + CacheServiceDeleteProcedure, + svc.Delete, + connect.WithSchema(cacheServiceDeleteMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceGetOrExtendReservationHandler := connect.NewUnaryHandler( + CacheServiceGetOrExtendReservationProcedure, + svc.GetOrExtendReservation, + connect.WithSchema(cacheServiceGetOrExtendReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceReleaseReservationHandler := connect.NewUnaryHandler( + CacheServiceReleaseReservationProcedure, + svc.ReleaseReservation, + connect.WithSchema(cacheServiceReleaseReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/flyteidl2.cacheservice.CacheService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case CacheServiceGetProcedure: + cacheServiceGetHandler.ServeHTTP(w, r) + case CacheServicePutProcedure: + cacheServicePutHandler.ServeHTTP(w, r) + case CacheServiceDeleteProcedure: + cacheServiceDeleteHandler.ServeHTTP(w, r) + case CacheServiceGetOrExtendReservationProcedure: + cacheServiceGetOrExtendReservationHandler.ServeHTTP(w, r) + case CacheServiceReleaseReservationProcedure: + cacheServiceReleaseReservationHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedCacheServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedCacheServiceHandler struct{} + +func (UnimplementedCacheServiceHandler) Get(context.Context, *connect.Request[cacheservice.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.CacheService.Get is not implemented")) +} + +func (UnimplementedCacheServiceHandler) Put(context.Context, *connect.Request[cacheservice.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.CacheService.Put is not implemented")) +} + +func (UnimplementedCacheServiceHandler) Delete(context.Context, *connect.Request[cacheservice.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.CacheService.Delete is not implemented")) +} + +func (UnimplementedCacheServiceHandler) GetOrExtendReservation(context.Context, *connect.Request[cacheservice.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.CacheService.GetOrExtendReservation is not implemented")) +} + +func (UnimplementedCacheServiceHandler) ReleaseReservation(context.Context, *connect.Request[cacheservice.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.CacheService.ReleaseReservation is not implemented")) +} diff --git a/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.go b/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.go new file mode 100644 index 0000000000..f64f140af1 --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.go @@ -0,0 +1,658 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/cacheservice/v2/cacheservice.proto + +package v2 + +import ( + _ "buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go/buf/validate" + cacheservice "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Identifier for cache operations, including org, project, and domain. +// This is used to scope cache operations to specific organizational contexts. +type Identifier struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Org string `protobuf:"bytes,1,opt,name=org,proto3" json:"org,omitempty"` // Organization identifier + Project string `protobuf:"bytes,2,opt,name=project,proto3" json:"project,omitempty"` // Project identifier + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` // Domain identifier +} + +func (x *Identifier) Reset() { + *x = Identifier{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Identifier) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Identifier) ProtoMessage() {} + +func (x *Identifier) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Identifier.ProtoReflect.Descriptor instead. +func (*Identifier) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{0} +} + +func (x *Identifier) GetOrg() string { + if x != nil { + return x.Org + } + return "" +} + +func (x *Identifier) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *Identifier) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +// Request to retrieve cached data by key. +type GetCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BaseRequest *cacheservice.GetCacheRequest `protobuf:"bytes,1,opt,name=base_request,json=baseRequest,proto3" json:"base_request,omitempty"` + Identifier *Identifier `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` // Identifier for the cache operation +} + +func (x *GetCacheRequest) Reset() { + *x = GetCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetCacheRequest) ProtoMessage() {} + +func (x *GetCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetCacheRequest.ProtoReflect.Descriptor instead. +func (*GetCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{1} +} + +func (x *GetCacheRequest) GetBaseRequest() *cacheservice.GetCacheRequest { + if x != nil { + return x.BaseRequest + } + return nil +} + +func (x *GetCacheRequest) GetIdentifier() *Identifier { + if x != nil { + return x.Identifier + } + return nil +} + +// Request to store/update cached data by key. +type PutCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BaseRequest *cacheservice.PutCacheRequest `protobuf:"bytes,1,opt,name=base_request,json=baseRequest,proto3" json:"base_request,omitempty"` + Identifier *Identifier `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` // Identifier for the cache operation +} + +func (x *PutCacheRequest) Reset() { + *x = PutCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PutCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PutCacheRequest) ProtoMessage() {} + +func (x *PutCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PutCacheRequest.ProtoReflect.Descriptor instead. +func (*PutCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{2} +} + +func (x *PutCacheRequest) GetBaseRequest() *cacheservice.PutCacheRequest { + if x != nil { + return x.BaseRequest + } + return nil +} + +func (x *PutCacheRequest) GetIdentifier() *Identifier { + if x != nil { + return x.Identifier + } + return nil +} + +// Request to delete cached data by key. +type DeleteCacheRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BaseRequest *cacheservice.DeleteCacheRequest `protobuf:"bytes,1,opt,name=base_request,json=baseRequest,proto3" json:"base_request,omitempty"` + Identifier *Identifier `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` // Identifier for the cache operation +} + +func (x *DeleteCacheRequest) Reset() { + *x = DeleteCacheRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteCacheRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteCacheRequest) ProtoMessage() {} + +func (x *DeleteCacheRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteCacheRequest.ProtoReflect.Descriptor instead. +func (*DeleteCacheRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{3} +} + +func (x *DeleteCacheRequest) GetBaseRequest() *cacheservice.DeleteCacheRequest { + if x != nil { + return x.BaseRequest + } + return nil +} + +func (x *DeleteCacheRequest) GetIdentifier() *Identifier { + if x != nil { + return x.Identifier + } + return nil +} + +// Request to get or extend a reservation for a cache key +type GetOrExtendReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BaseRequest *cacheservice.GetOrExtendReservationRequest `protobuf:"bytes,1,opt,name=base_request,json=baseRequest,proto3" json:"base_request,omitempty"` + Identifier *Identifier `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` // Identifier for the cache operation +} + +func (x *GetOrExtendReservationRequest) Reset() { + *x = GetOrExtendReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOrExtendReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrExtendReservationRequest) ProtoMessage() {} + +func (x *GetOrExtendReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrExtendReservationRequest.ProtoReflect.Descriptor instead. +func (*GetOrExtendReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{4} +} + +func (x *GetOrExtendReservationRequest) GetBaseRequest() *cacheservice.GetOrExtendReservationRequest { + if x != nil { + return x.BaseRequest + } + return nil +} + +func (x *GetOrExtendReservationRequest) GetIdentifier() *Identifier { + if x != nil { + return x.Identifier + } + return nil +} + +// Request to release the reservation for a cache key +type ReleaseReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + BaseRequest *cacheservice.ReleaseReservationRequest `protobuf:"bytes,1,opt,name=base_request,json=baseRequest,proto3" json:"base_request,omitempty"` + Identifier *Identifier `protobuf:"bytes,2,opt,name=identifier,proto3" json:"identifier,omitempty"` // Identifier for the cache operation +} + +func (x *ReleaseReservationRequest) Reset() { + *x = ReleaseReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseReservationRequest) ProtoMessage() {} + +func (x *ReleaseReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseReservationRequest.ProtoReflect.Descriptor instead. +func (*ReleaseReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP(), []int{5} +} + +func (x *ReleaseReservationRequest) GetBaseRequest() *cacheservice.ReleaseReservationRequest { + if x != nil { + return x.BaseRequest + } + return nil +} + +func (x *ReleaseReservationRequest) GetIdentifier() *Identifier { + if x != nil { + return x.Identifier + } + return nil +} + +var File_flyteidl2_cacheservice_v2_cacheservice_proto protoreflect.FileDescriptor + +var file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDesc = []byte{ + 0x0a, 0x2c, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x19, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1b, 0x62, 0x75, 0x66, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0x6b, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, + 0x19, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, + 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x12, 0x21, 0x0a, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, + 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1f, 0x0a, + 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, + 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xac, + 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, + 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, + 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xac, 0x01, + 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4a, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, + 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xb2, 0x01, 0x0a, + 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, + 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x22, 0xc8, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, + 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, + 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, 0xc0, 0x01, 0x0a, + 0x19, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x54, 0x0a, 0x0c, 0x62, 0x61, + 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, + 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x32, + 0xbb, 0x04, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x12, 0x5b, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5b, 0x0a, + 0x03, 0x50, 0x75, 0x74, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x06, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x8a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, + 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, + 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xf9, 0x01, + 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x42, + 0x11, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x19, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x25, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x56, 0x32, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1b, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var ( + file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescOnce sync.Once + file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescData = file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDesc +) + +func file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescGZIP() []byte { + file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescOnce.Do(func() { + file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescData) + }) + return file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDescData +} + +var file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_flyteidl2_cacheservice_v2_cacheservice_proto_goTypes = []interface{}{ + (*Identifier)(nil), // 0: flyteidl2.cacheservice.v2.Identifier + (*GetCacheRequest)(nil), // 1: flyteidl2.cacheservice.v2.GetCacheRequest + (*PutCacheRequest)(nil), // 2: flyteidl2.cacheservice.v2.PutCacheRequest + (*DeleteCacheRequest)(nil), // 3: flyteidl2.cacheservice.v2.DeleteCacheRequest + (*GetOrExtendReservationRequest)(nil), // 4: flyteidl2.cacheservice.v2.GetOrExtendReservationRequest + (*ReleaseReservationRequest)(nil), // 5: flyteidl2.cacheservice.v2.ReleaseReservationRequest + (*cacheservice.GetCacheRequest)(nil), // 6: flyteidl2.cacheservice.GetCacheRequest + (*cacheservice.PutCacheRequest)(nil), // 7: flyteidl2.cacheservice.PutCacheRequest + (*cacheservice.DeleteCacheRequest)(nil), // 8: flyteidl2.cacheservice.DeleteCacheRequest + (*cacheservice.GetOrExtendReservationRequest)(nil), // 9: flyteidl2.cacheservice.GetOrExtendReservationRequest + (*cacheservice.ReleaseReservationRequest)(nil), // 10: flyteidl2.cacheservice.ReleaseReservationRequest + (*cacheservice.GetCacheResponse)(nil), // 11: flyteidl2.cacheservice.GetCacheResponse + (*cacheservice.PutCacheResponse)(nil), // 12: flyteidl2.cacheservice.PutCacheResponse + (*cacheservice.DeleteCacheResponse)(nil), // 13: flyteidl2.cacheservice.DeleteCacheResponse + (*cacheservice.GetOrExtendReservationResponse)(nil), // 14: flyteidl2.cacheservice.GetOrExtendReservationResponse + (*cacheservice.ReleaseReservationResponse)(nil), // 15: flyteidl2.cacheservice.ReleaseReservationResponse +} +var file_flyteidl2_cacheservice_v2_cacheservice_proto_depIdxs = []int32{ + 6, // 0: flyteidl2.cacheservice.v2.GetCacheRequest.base_request:type_name -> flyteidl2.cacheservice.GetCacheRequest + 0, // 1: flyteidl2.cacheservice.v2.GetCacheRequest.identifier:type_name -> flyteidl2.cacheservice.v2.Identifier + 7, // 2: flyteidl2.cacheservice.v2.PutCacheRequest.base_request:type_name -> flyteidl2.cacheservice.PutCacheRequest + 0, // 3: flyteidl2.cacheservice.v2.PutCacheRequest.identifier:type_name -> flyteidl2.cacheservice.v2.Identifier + 8, // 4: flyteidl2.cacheservice.v2.DeleteCacheRequest.base_request:type_name -> flyteidl2.cacheservice.DeleteCacheRequest + 0, // 5: flyteidl2.cacheservice.v2.DeleteCacheRequest.identifier:type_name -> flyteidl2.cacheservice.v2.Identifier + 9, // 6: flyteidl2.cacheservice.v2.GetOrExtendReservationRequest.base_request:type_name -> flyteidl2.cacheservice.GetOrExtendReservationRequest + 0, // 7: flyteidl2.cacheservice.v2.GetOrExtendReservationRequest.identifier:type_name -> flyteidl2.cacheservice.v2.Identifier + 10, // 8: flyteidl2.cacheservice.v2.ReleaseReservationRequest.base_request:type_name -> flyteidl2.cacheservice.ReleaseReservationRequest + 0, // 9: flyteidl2.cacheservice.v2.ReleaseReservationRequest.identifier:type_name -> flyteidl2.cacheservice.v2.Identifier + 1, // 10: flyteidl2.cacheservice.v2.CacheService.Get:input_type -> flyteidl2.cacheservice.v2.GetCacheRequest + 2, // 11: flyteidl2.cacheservice.v2.CacheService.Put:input_type -> flyteidl2.cacheservice.v2.PutCacheRequest + 3, // 12: flyteidl2.cacheservice.v2.CacheService.Delete:input_type -> flyteidl2.cacheservice.v2.DeleteCacheRequest + 4, // 13: flyteidl2.cacheservice.v2.CacheService.GetOrExtendReservation:input_type -> flyteidl2.cacheservice.v2.GetOrExtendReservationRequest + 5, // 14: flyteidl2.cacheservice.v2.CacheService.ReleaseReservation:input_type -> flyteidl2.cacheservice.v2.ReleaseReservationRequest + 11, // 15: flyteidl2.cacheservice.v2.CacheService.Get:output_type -> flyteidl2.cacheservice.GetCacheResponse + 12, // 16: flyteidl2.cacheservice.v2.CacheService.Put:output_type -> flyteidl2.cacheservice.PutCacheResponse + 13, // 17: flyteidl2.cacheservice.v2.CacheService.Delete:output_type -> flyteidl2.cacheservice.DeleteCacheResponse + 14, // 18: flyteidl2.cacheservice.v2.CacheService.GetOrExtendReservation:output_type -> flyteidl2.cacheservice.GetOrExtendReservationResponse + 15, // 19: flyteidl2.cacheservice.v2.CacheService.ReleaseReservation:output_type -> flyteidl2.cacheservice.ReleaseReservationResponse + 15, // [15:20] is the sub-list for method output_type + 10, // [10:15] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name +} + +func init() { file_flyteidl2_cacheservice_v2_cacheservice_proto_init() } +func file_flyteidl2_cacheservice_v2_cacheservice_proto_init() { + if File_flyteidl2_cacheservice_v2_cacheservice_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Identifier); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PutCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteCacheRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOrExtendReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_flyteidl2_cacheservice_v2_cacheservice_proto_goTypes, + DependencyIndexes: file_flyteidl2_cacheservice_v2_cacheservice_proto_depIdxs, + MessageInfos: file_flyteidl2_cacheservice_v2_cacheservice_proto_msgTypes, + }.Build() + File_flyteidl2_cacheservice_v2_cacheservice_proto = out.File + file_flyteidl2_cacheservice_v2_cacheservice_proto_rawDesc = nil + file_flyteidl2_cacheservice_v2_cacheservice_proto_goTypes = nil + file_flyteidl2_cacheservice_v2_cacheservice_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.validate.go b/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.validate.go new file mode 100644 index 0000000000..d0209a063b --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/v2/cacheservice.pb.validate.go @@ -0,0 +1,938 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/cacheservice/v2/cacheservice.proto + +package v2 + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on Identifier with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Identifier) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Identifier with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in IdentifierMultiError, or +// nil if none found. +func (m *Identifier) ValidateAll() error { + return m.validate(true) +} + +func (m *Identifier) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Org + + // no validation rules for Project + + // no validation rules for Domain + + if len(errors) > 0 { + return IdentifierMultiError(errors) + } + + return nil +} + +// IdentifierMultiError is an error wrapping multiple validation errors +// returned by Identifier.ValidateAll() if the designated constraints aren't met. +type IdentifierMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m IdentifierMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m IdentifierMultiError) AllErrors() []error { return m } + +// IdentifierValidationError is the validation error returned by +// Identifier.Validate if the designated constraints aren't met. +type IdentifierValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e IdentifierValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e IdentifierValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e IdentifierValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e IdentifierValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e IdentifierValidationError) ErrorName() string { return "IdentifierValidationError" } + +// Error satisfies the builtin error interface +func (e IdentifierValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sIdentifier.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = IdentifierValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = IdentifierValidationError{} + +// Validate checks the field values on GetCacheRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetCacheRequestMultiError, or nil if none found. +func (m *GetCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBaseRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBaseRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetCacheRequestMultiError(errors) + } + + return nil +} + +// GetCacheRequestMultiError is an error wrapping multiple validation errors +// returned by GetCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type GetCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetCacheRequestMultiError) AllErrors() []error { return m } + +// GetCacheRequestValidationError is the validation error returned by +// GetCacheRequest.Validate if the designated constraints aren't met. +type GetCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetCacheRequestValidationError) ErrorName() string { return "GetCacheRequestValidationError" } + +// Error satisfies the builtin error interface +func (e GetCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetCacheRequestValidationError{} + +// Validate checks the field values on PutCacheRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PutCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PutCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PutCacheRequestMultiError, or nil if none found. +func (m *PutCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *PutCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBaseRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBaseRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PutCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PutCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PutCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return PutCacheRequestMultiError(errors) + } + + return nil +} + +// PutCacheRequestMultiError is an error wrapping multiple validation errors +// returned by PutCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type PutCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PutCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PutCacheRequestMultiError) AllErrors() []error { return m } + +// PutCacheRequestValidationError is the validation error returned by +// PutCacheRequest.Validate if the designated constraints aren't met. +type PutCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PutCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PutCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PutCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PutCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PutCacheRequestValidationError) ErrorName() string { return "PutCacheRequestValidationError" } + +// Error satisfies the builtin error interface +func (e PutCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPutCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PutCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PutCacheRequestValidationError{} + +// Validate checks the field values on DeleteCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DeleteCacheRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DeleteCacheRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DeleteCacheRequestMultiError, or nil if none found. +func (m *DeleteCacheRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *DeleteCacheRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBaseRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeleteCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeleteCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBaseRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeleteCacheRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DeleteCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DeleteCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DeleteCacheRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DeleteCacheRequestMultiError(errors) + } + + return nil +} + +// DeleteCacheRequestMultiError is an error wrapping multiple validation errors +// returned by DeleteCacheRequest.ValidateAll() if the designated constraints +// aren't met. +type DeleteCacheRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DeleteCacheRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DeleteCacheRequestMultiError) AllErrors() []error { return m } + +// DeleteCacheRequestValidationError is the validation error returned by +// DeleteCacheRequest.Validate if the designated constraints aren't met. +type DeleteCacheRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DeleteCacheRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DeleteCacheRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DeleteCacheRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DeleteCacheRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DeleteCacheRequestValidationError) ErrorName() string { + return "DeleteCacheRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e DeleteCacheRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDeleteCacheRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DeleteCacheRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DeleteCacheRequestValidationError{} + +// Validate checks the field values on GetOrExtendReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetOrExtendReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetOrExtendReservationRequest with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetOrExtendReservationRequestMultiError, or nil if none found. +func (m *GetOrExtendReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetOrExtendReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBaseRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBaseRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetOrExtendReservationRequestMultiError(errors) + } + + return nil +} + +// GetOrExtendReservationRequestMultiError is an error wrapping multiple +// validation errors returned by GetOrExtendReservationRequest.ValidateAll() +// if the designated constraints aren't met. +type GetOrExtendReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetOrExtendReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetOrExtendReservationRequestMultiError) AllErrors() []error { return m } + +// GetOrExtendReservationRequestValidationError is the validation error +// returned by GetOrExtendReservationRequest.Validate if the designated +// constraints aren't met. +type GetOrExtendReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetOrExtendReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetOrExtendReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetOrExtendReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetOrExtendReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetOrExtendReservationRequestValidationError) ErrorName() string { + return "GetOrExtendReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetOrExtendReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetOrExtendReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetOrExtendReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetOrExtendReservationRequestValidationError{} + +// Validate checks the field values on ReleaseReservationRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ReleaseReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReleaseReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ReleaseReservationRequestMultiError, or nil if none found. +func (m *ReleaseReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ReleaseReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetBaseRequest()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetBaseRequest()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReleaseReservationRequestValidationError{ + field: "BaseRequest", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetIdentifier()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetIdentifier()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReleaseReservationRequestValidationError{ + field: "Identifier", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ReleaseReservationRequestMultiError(errors) + } + + return nil +} + +// ReleaseReservationRequestMultiError is an error wrapping multiple validation +// errors returned by ReleaseReservationRequest.ValidateAll() if the +// designated constraints aren't met. +type ReleaseReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReleaseReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReleaseReservationRequestMultiError) AllErrors() []error { return m } + +// ReleaseReservationRequestValidationError is the validation error returned by +// ReleaseReservationRequest.Validate if the designated constraints aren't met. +type ReleaseReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReleaseReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReleaseReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReleaseReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReleaseReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReleaseReservationRequestValidationError) ErrorName() string { + return "ReleaseReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ReleaseReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReleaseReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReleaseReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReleaseReservationRequestValidationError{} diff --git a/gen/go/flyteidl2/cacheservice/v2/cacheservice_grpc.pb.go b/gen/go/flyteidl2/cacheservice/v2/cacheservice_grpc.pb.go new file mode 100644 index 0000000000..0396cdee0a --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/v2/cacheservice_grpc.pb.go @@ -0,0 +1,266 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: flyteidl2/cacheservice/v2/cacheservice.proto + +package v2 + +import ( + context "context" + cacheservice "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + CacheService_Get_FullMethodName = "/flyteidl2.cacheservice.v2.CacheService/Get" + CacheService_Put_FullMethodName = "/flyteidl2.cacheservice.v2.CacheService/Put" + CacheService_Delete_FullMethodName = "/flyteidl2.cacheservice.v2.CacheService/Delete" + CacheService_GetOrExtendReservation_FullMethodName = "/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation" + CacheService_ReleaseReservation_FullMethodName = "/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation" +) + +// CacheServiceClient is the client API for CacheService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type CacheServiceClient interface { + // Retrieves cached data by key. + Get(ctx context.Context, in *GetCacheRequest, opts ...grpc.CallOption) (*cacheservice.GetCacheResponse, error) + // Stores or updates cached data by key. + Put(ctx context.Context, in *PutCacheRequest, opts ...grpc.CallOption) (*cacheservice.PutCacheResponse, error) + // Deletes cached data by key. + Delete(ctx context.Context, in *DeleteCacheRequest, opts ...grpc.CallOption) (*cacheservice.DeleteCacheResponse, error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*cacheservice.GetOrExtendReservationResponse, error) + // Release the reservation for a cache key + ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*cacheservice.ReleaseReservationResponse, error) +} + +type cacheServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewCacheServiceClient(cc grpc.ClientConnInterface) CacheServiceClient { + return &cacheServiceClient{cc} +} + +func (c *cacheServiceClient) Get(ctx context.Context, in *GetCacheRequest, opts ...grpc.CallOption) (*cacheservice.GetCacheResponse, error) { + out := new(cacheservice.GetCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Get_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) Put(ctx context.Context, in *PutCacheRequest, opts ...grpc.CallOption) (*cacheservice.PutCacheResponse, error) { + out := new(cacheservice.PutCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Put_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) Delete(ctx context.Context, in *DeleteCacheRequest, opts ...grpc.CallOption) (*cacheservice.DeleteCacheResponse, error) { + out := new(cacheservice.DeleteCacheResponse) + err := c.cc.Invoke(ctx, CacheService_Delete_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*cacheservice.GetOrExtendReservationResponse, error) { + out := new(cacheservice.GetOrExtendReservationResponse) + err := c.cc.Invoke(ctx, CacheService_GetOrExtendReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *cacheServiceClient) ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*cacheservice.ReleaseReservationResponse, error) { + out := new(cacheservice.ReleaseReservationResponse) + err := c.cc.Invoke(ctx, CacheService_ReleaseReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// CacheServiceServer is the server API for CacheService service. +// All implementations should embed UnimplementedCacheServiceServer +// for forward compatibility +type CacheServiceServer interface { + // Retrieves cached data by key. + Get(context.Context, *GetCacheRequest) (*cacheservice.GetCacheResponse, error) + // Stores or updates cached data by key. + Put(context.Context, *PutCacheRequest) (*cacheservice.PutCacheResponse, error) + // Deletes cached data by key. + Delete(context.Context, *DeleteCacheRequest) (*cacheservice.DeleteCacheResponse, error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*cacheservice.GetOrExtendReservationResponse, error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *ReleaseReservationRequest) (*cacheservice.ReleaseReservationResponse, error) +} + +// UnimplementedCacheServiceServer should be embedded to have forward compatible implementations. +type UnimplementedCacheServiceServer struct { +} + +func (UnimplementedCacheServiceServer) Get(context.Context, *GetCacheRequest) (*cacheservice.GetCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (UnimplementedCacheServiceServer) Put(context.Context, *PutCacheRequest) (*cacheservice.PutCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (UnimplementedCacheServiceServer) Delete(context.Context, *DeleteCacheRequest) (*cacheservice.DeleteCacheResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (UnimplementedCacheServiceServer) GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*cacheservice.GetOrExtendReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrExtendReservation not implemented") +} +func (UnimplementedCacheServiceServer) ReleaseReservation(context.Context, *ReleaseReservationRequest) (*cacheservice.ReleaseReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseReservation not implemented") +} + +// UnsafeCacheServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CacheServiceServer will +// result in compilation errors. +type UnsafeCacheServiceServer interface { + mustEmbedUnimplementedCacheServiceServer() +} + +func RegisterCacheServiceServer(s grpc.ServiceRegistrar, srv CacheServiceServer) { + s.RegisterService(&CacheService_ServiceDesc, srv) +} + +func _CacheService_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Get_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Get(ctx, req.(*GetCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Put_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Put(ctx, req.(*PutCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteCacheRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_Delete_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).Delete(ctx, req.(*DeleteCacheRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_GetOrExtendReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOrExtendReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).GetOrExtendReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_GetOrExtendReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).GetOrExtendReservation(ctx, req.(*GetOrExtendReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _CacheService_ReleaseReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(CacheServiceServer).ReleaseReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: CacheService_ReleaseReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(CacheServiceServer).ReleaseReservation(ctx, req.(*ReleaseReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// CacheService_ServiceDesc is the grpc.ServiceDesc for CacheService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var CacheService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "flyteidl2.cacheservice.v2.CacheService", + HandlerType: (*CacheServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Get", + Handler: _CacheService_Get_Handler, + }, + { + MethodName: "Put", + Handler: _CacheService_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _CacheService_Delete_Handler, + }, + { + MethodName: "GetOrExtendReservation", + Handler: _CacheService_GetOrExtendReservation_Handler, + }, + { + MethodName: "ReleaseReservation", + Handler: _CacheService_ReleaseReservation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "flyteidl2/cacheservice/v2/cacheservice.proto", +} diff --git a/gen/go/flyteidl2/cacheservice/v2/v2connect/cacheservice.connect.go b/gen/go/flyteidl2/cacheservice/v2/v2connect/cacheservice.connect.go new file mode 100644 index 0000000000..32e46d5181 --- /dev/null +++ b/gen/go/flyteidl2/cacheservice/v2/v2connect/cacheservice.connect.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: flyteidl2/cacheservice/v2/cacheservice.proto + +package v2connect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + cacheservice "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice" + v2 "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice/v2" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // CacheServiceName is the fully-qualified name of the CacheService service. + CacheServiceName = "flyteidl2.cacheservice.v2.CacheService" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // CacheServiceGetProcedure is the fully-qualified name of the CacheService's Get RPC. + CacheServiceGetProcedure = "/flyteidl2.cacheservice.v2.CacheService/Get" + // CacheServicePutProcedure is the fully-qualified name of the CacheService's Put RPC. + CacheServicePutProcedure = "/flyteidl2.cacheservice.v2.CacheService/Put" + // CacheServiceDeleteProcedure is the fully-qualified name of the CacheService's Delete RPC. + CacheServiceDeleteProcedure = "/flyteidl2.cacheservice.v2.CacheService/Delete" + // CacheServiceGetOrExtendReservationProcedure is the fully-qualified name of the CacheService's + // GetOrExtendReservation RPC. + CacheServiceGetOrExtendReservationProcedure = "/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation" + // CacheServiceReleaseReservationProcedure is the fully-qualified name of the CacheService's + // ReleaseReservation RPC. + CacheServiceReleaseReservationProcedure = "/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + cacheServiceServiceDescriptor = v2.File_flyteidl2_cacheservice_v2_cacheservice_proto.Services().ByName("CacheService") + cacheServiceGetMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Get") + cacheServicePutMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Put") + cacheServiceDeleteMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("Delete") + cacheServiceGetOrExtendReservationMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("GetOrExtendReservation") + cacheServiceReleaseReservationMethodDescriptor = cacheServiceServiceDescriptor.Methods().ByName("ReleaseReservation") +) + +// CacheServiceClient is a client for the flyteidl2.cacheservice.v2.CacheService service. +type CacheServiceClient interface { + // Retrieves cached data by key. + Get(context.Context, *connect.Request[v2.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) + // Stores or updates cached data by key. + Put(context.Context, *connect.Request[v2.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) + // Deletes cached data by key. + Delete(context.Context, *connect.Request[v2.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *connect.Request[v2.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *connect.Request[v2.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) +} + +// NewCacheServiceClient constructs a client for the flyteidl2.cacheservice.v2.CacheService service. +// By default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped +// responses, and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewCacheServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) CacheServiceClient { + baseURL = strings.TrimRight(baseURL, "/") + return &cacheServiceClient{ + get: connect.NewClient[v2.GetCacheRequest, cacheservice.GetCacheResponse]( + httpClient, + baseURL+CacheServiceGetProcedure, + connect.WithSchema(cacheServiceGetMethodDescriptor), + connect.WithClientOptions(opts...), + ), + put: connect.NewClient[v2.PutCacheRequest, cacheservice.PutCacheResponse]( + httpClient, + baseURL+CacheServicePutProcedure, + connect.WithSchema(cacheServicePutMethodDescriptor), + connect.WithClientOptions(opts...), + ), + delete: connect.NewClient[v2.DeleteCacheRequest, cacheservice.DeleteCacheResponse]( + httpClient, + baseURL+CacheServiceDeleteProcedure, + connect.WithSchema(cacheServiceDeleteMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getOrExtendReservation: connect.NewClient[v2.GetOrExtendReservationRequest, cacheservice.GetOrExtendReservationResponse]( + httpClient, + baseURL+CacheServiceGetOrExtendReservationProcedure, + connect.WithSchema(cacheServiceGetOrExtendReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + releaseReservation: connect.NewClient[v2.ReleaseReservationRequest, cacheservice.ReleaseReservationResponse]( + httpClient, + baseURL+CacheServiceReleaseReservationProcedure, + connect.WithSchema(cacheServiceReleaseReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// cacheServiceClient implements CacheServiceClient. +type cacheServiceClient struct { + get *connect.Client[v2.GetCacheRequest, cacheservice.GetCacheResponse] + put *connect.Client[v2.PutCacheRequest, cacheservice.PutCacheResponse] + delete *connect.Client[v2.DeleteCacheRequest, cacheservice.DeleteCacheResponse] + getOrExtendReservation *connect.Client[v2.GetOrExtendReservationRequest, cacheservice.GetOrExtendReservationResponse] + releaseReservation *connect.Client[v2.ReleaseReservationRequest, cacheservice.ReleaseReservationResponse] +} + +// Get calls flyteidl2.cacheservice.v2.CacheService.Get. +func (c *cacheServiceClient) Get(ctx context.Context, req *connect.Request[v2.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) { + return c.get.CallUnary(ctx, req) +} + +// Put calls flyteidl2.cacheservice.v2.CacheService.Put. +func (c *cacheServiceClient) Put(ctx context.Context, req *connect.Request[v2.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) { + return c.put.CallUnary(ctx, req) +} + +// Delete calls flyteidl2.cacheservice.v2.CacheService.Delete. +func (c *cacheServiceClient) Delete(ctx context.Context, req *connect.Request[v2.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) { + return c.delete.CallUnary(ctx, req) +} + +// GetOrExtendReservation calls flyteidl2.cacheservice.v2.CacheService.GetOrExtendReservation. +func (c *cacheServiceClient) GetOrExtendReservation(ctx context.Context, req *connect.Request[v2.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) { + return c.getOrExtendReservation.CallUnary(ctx, req) +} + +// ReleaseReservation calls flyteidl2.cacheservice.v2.CacheService.ReleaseReservation. +func (c *cacheServiceClient) ReleaseReservation(ctx context.Context, req *connect.Request[v2.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) { + return c.releaseReservation.CallUnary(ctx, req) +} + +// CacheServiceHandler is an implementation of the flyteidl2.cacheservice.v2.CacheService service. +type CacheServiceHandler interface { + // Retrieves cached data by key. + Get(context.Context, *connect.Request[v2.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) + // Stores or updates cached data by key. + Put(context.Context, *connect.Request[v2.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) + // Deletes cached data by key. + Delete(context.Context, *connect.Request[v2.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) + // Get or extend a reservation for a cache key + GetOrExtendReservation(context.Context, *connect.Request[v2.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) + // Release the reservation for a cache key + ReleaseReservation(context.Context, *connect.Request[v2.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) +} + +// NewCacheServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewCacheServiceHandler(svc CacheServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + cacheServiceGetHandler := connect.NewUnaryHandler( + CacheServiceGetProcedure, + svc.Get, + connect.WithSchema(cacheServiceGetMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServicePutHandler := connect.NewUnaryHandler( + CacheServicePutProcedure, + svc.Put, + connect.WithSchema(cacheServicePutMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceDeleteHandler := connect.NewUnaryHandler( + CacheServiceDeleteProcedure, + svc.Delete, + connect.WithSchema(cacheServiceDeleteMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceGetOrExtendReservationHandler := connect.NewUnaryHandler( + CacheServiceGetOrExtendReservationProcedure, + svc.GetOrExtendReservation, + connect.WithSchema(cacheServiceGetOrExtendReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + cacheServiceReleaseReservationHandler := connect.NewUnaryHandler( + CacheServiceReleaseReservationProcedure, + svc.ReleaseReservation, + connect.WithSchema(cacheServiceReleaseReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/flyteidl2.cacheservice.v2.CacheService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case CacheServiceGetProcedure: + cacheServiceGetHandler.ServeHTTP(w, r) + case CacheServicePutProcedure: + cacheServicePutHandler.ServeHTTP(w, r) + case CacheServiceDeleteProcedure: + cacheServiceDeleteHandler.ServeHTTP(w, r) + case CacheServiceGetOrExtendReservationProcedure: + cacheServiceGetOrExtendReservationHandler.ServeHTTP(w, r) + case CacheServiceReleaseReservationProcedure: + cacheServiceReleaseReservationHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedCacheServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedCacheServiceHandler struct{} + +func (UnimplementedCacheServiceHandler) Get(context.Context, *connect.Request[v2.GetCacheRequest]) (*connect.Response[cacheservice.GetCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.v2.CacheService.Get is not implemented")) +} + +func (UnimplementedCacheServiceHandler) Put(context.Context, *connect.Request[v2.PutCacheRequest]) (*connect.Response[cacheservice.PutCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.v2.CacheService.Put is not implemented")) +} + +func (UnimplementedCacheServiceHandler) Delete(context.Context, *connect.Request[v2.DeleteCacheRequest]) (*connect.Response[cacheservice.DeleteCacheResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.v2.CacheService.Delete is not implemented")) +} + +func (UnimplementedCacheServiceHandler) GetOrExtendReservation(context.Context, *connect.Request[v2.GetOrExtendReservationRequest]) (*connect.Response[cacheservice.GetOrExtendReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.v2.CacheService.GetOrExtendReservation is not implemented")) +} + +func (UnimplementedCacheServiceHandler) ReleaseReservation(context.Context, *connect.Request[v2.ReleaseReservationRequest]) (*connect.Response[cacheservice.ReleaseReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.cacheservice.v2.CacheService.ReleaseReservation is not implemented")) +} diff --git a/gen/go/flyteidl2/common/configuration.pb.go b/gen/go/flyteidl2/common/configuration.pb.go new file mode 100644 index 0000000000..f7e5eec149 --- /dev/null +++ b/gen/go/flyteidl2/common/configuration.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/common/configuration.proto + +package common + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// The source of an attribute. We may have other sources in the future. +type AttributesSource int32 + +const ( + // The source is unspecified. + AttributesSource_SOURCE_UNSPECIFIED AttributesSource = 0 + // The configuration is a global configuration. + AttributesSource_GLOBAL AttributesSource = 1 + // The configuration is a domain configuration. + AttributesSource_DOMAIN AttributesSource = 2 + // The configuration is a project configuration. + AttributesSource_PROJECT AttributesSource = 3 + // The configuration is a project-domain configuration. + AttributesSource_PROJECT_DOMAIN AttributesSource = 4 + // The configuration is a org configuration. + AttributesSource_ORG AttributesSource = 5 +) + +// Enum value maps for AttributesSource. +var ( + AttributesSource_name = map[int32]string{ + 0: "SOURCE_UNSPECIFIED", + 1: "GLOBAL", + 2: "DOMAIN", + 3: "PROJECT", + 4: "PROJECT_DOMAIN", + 5: "ORG", + } + AttributesSource_value = map[string]int32{ + "SOURCE_UNSPECIFIED": 0, + "GLOBAL": 1, + "DOMAIN": 2, + "PROJECT": 3, + "PROJECT_DOMAIN": 4, + "ORG": 5, + } +) + +func (x AttributesSource) Enum() *AttributesSource { + p := new(AttributesSource) + *p = x + return p +} + +func (x AttributesSource) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (AttributesSource) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_common_configuration_proto_enumTypes[0].Descriptor() +} + +func (AttributesSource) Type() protoreflect.EnumType { + return &file_flyteidl2_common_configuration_proto_enumTypes[0] +} + +func (x AttributesSource) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use AttributesSource.Descriptor instead. +func (AttributesSource) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_common_configuration_proto_rawDescGZIP(), []int{0} +} + +var File_flyteidl2_common_configuration_proto protoreflect.FileDescriptor + +var file_flyteidl2_common_configuration_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x10, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2a, 0x6c, 0x0a, 0x10, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x12, + 0x53, 0x4f, 0x55, 0x52, 0x43, 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, + 0x45, 0x44, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x01, + 0x12, 0x0a, 0x0a, 0x06, 0x44, 0x4f, 0x4d, 0x41, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, + 0x50, 0x52, 0x4f, 0x4a, 0x45, 0x43, 0x54, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x52, 0x4f, + 0x4a, 0x45, 0x43, 0x54, 0x5f, 0x44, 0x4f, 0x4d, 0x41, 0x49, 0x4e, 0x10, 0x04, 0x12, 0x07, 0x0a, + 0x03, 0x4f, 0x52, 0x47, 0x10, 0x05, 0x42, 0xc3, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, + 0x12, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xa2, 0x02, 0x03, + 0x46, 0x43, 0x58, 0xaa, 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xca, 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xe2, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_common_configuration_proto_rawDescOnce sync.Once + file_flyteidl2_common_configuration_proto_rawDescData = file_flyteidl2_common_configuration_proto_rawDesc +) + +func file_flyteidl2_common_configuration_proto_rawDescGZIP() []byte { + file_flyteidl2_common_configuration_proto_rawDescOnce.Do(func() { + file_flyteidl2_common_configuration_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_common_configuration_proto_rawDescData) + }) + return file_flyteidl2_common_configuration_proto_rawDescData +} + +var file_flyteidl2_common_configuration_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_flyteidl2_common_configuration_proto_goTypes = []interface{}{ + (AttributesSource)(0), // 0: flyteidl2.common.AttributesSource +} +var file_flyteidl2_common_configuration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_flyteidl2_common_configuration_proto_init() } +func file_flyteidl2_common_configuration_proto_init() { + if File_flyteidl2_common_configuration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_common_configuration_proto_rawDesc, + NumEnums: 1, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_common_configuration_proto_goTypes, + DependencyIndexes: file_flyteidl2_common_configuration_proto_depIdxs, + EnumInfos: file_flyteidl2_common_configuration_proto_enumTypes, + }.Build() + File_flyteidl2_common_configuration_proto = out.File + file_flyteidl2_common_configuration_proto_rawDesc = nil + file_flyteidl2_common_configuration_proto_goTypes = nil + file_flyteidl2_common_configuration_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/common/configuration.pb.validate.go b/gen/go/flyteidl2/common/configuration.pb.validate.go new file mode 100644 index 0000000000..e0e600242c --- /dev/null +++ b/gen/go/flyteidl2/common/configuration.pb.validate.go @@ -0,0 +1,36 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/common/configuration.proto + +package common + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) diff --git a/gen/go/flyteidl2/core/errors.pb.go b/gen/go/flyteidl2/core/errors.pb.go new file mode 100644 index 0000000000..a9eedf30a5 --- /dev/null +++ b/gen/go/flyteidl2/core/errors.pb.go @@ -0,0 +1,320 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/core/errors.proto + +package core + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines a generic error type that dictates the behavior of the retry strategy. +type ContainerError_Kind int32 + +const ( + ContainerError_NON_RECOVERABLE ContainerError_Kind = 0 + ContainerError_RECOVERABLE ContainerError_Kind = 1 +) + +// Enum value maps for ContainerError_Kind. +var ( + ContainerError_Kind_name = map[int32]string{ + 0: "NON_RECOVERABLE", + 1: "RECOVERABLE", + } + ContainerError_Kind_value = map[string]int32{ + "NON_RECOVERABLE": 0, + "RECOVERABLE": 1, + } +) + +func (x ContainerError_Kind) Enum() *ContainerError_Kind { + p := new(ContainerError_Kind) + *p = x + return p +} + +func (x ContainerError_Kind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ContainerError_Kind) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_core_errors_proto_enumTypes[0].Descriptor() +} + +func (ContainerError_Kind) Type() protoreflect.EnumType { + return &file_flyteidl2_core_errors_proto_enumTypes[0] +} + +func (x ContainerError_Kind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ContainerError_Kind.Descriptor instead. +func (ContainerError_Kind) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_core_errors_proto_rawDescGZIP(), []int{0, 0} +} + +// Error message to propagate detailed errors from container executions to the execution +// engine. +type ContainerError struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A simplified code for errors, so that we can provide a glossary of all possible errors. + Code string `protobuf:"bytes,1,opt,name=code,proto3" json:"code,omitempty"` + // A detailed error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + Kind ContainerError_Kind `protobuf:"varint,3,opt,name=kind,proto3,enum=flyteidl2.core.ContainerError_Kind" json:"kind,omitempty"` + // Defines the origin of the error (system, user, unknown). + Origin ExecutionError_ErrorKind `protobuf:"varint,4,opt,name=origin,proto3,enum=flyteidl2.core.ExecutionError_ErrorKind" json:"origin,omitempty"` +} + +func (x *ContainerError) Reset() { + *x = ContainerError{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_core_errors_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ContainerError) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ContainerError) ProtoMessage() {} + +func (x *ContainerError) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_core_errors_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ContainerError.ProtoReflect.Descriptor instead. +func (*ContainerError) Descriptor() ([]byte, []int) { + return file_flyteidl2_core_errors_proto_rawDescGZIP(), []int{0} +} + +func (x *ContainerError) GetCode() string { + if x != nil { + return x.Code + } + return "" +} + +func (x *ContainerError) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ContainerError) GetKind() ContainerError_Kind { + if x != nil { + return x.Kind + } + return ContainerError_NON_RECOVERABLE +} + +func (x *ContainerError) GetOrigin() ExecutionError_ErrorKind { + if x != nil { + return x.Origin + } + return ExecutionError_UNKNOWN +} + +// Defines the errors.pb file format the container can produce to communicate +// failure reasons to the execution engine. +type ErrorDocument struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The error raised during execution. + Error *ContainerError `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` +} + +func (x *ErrorDocument) Reset() { + *x = ErrorDocument{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_core_errors_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ErrorDocument) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ErrorDocument) ProtoMessage() {} + +func (x *ErrorDocument) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_core_errors_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ErrorDocument.ProtoReflect.Descriptor instead. +func (*ErrorDocument) Descriptor() ([]byte, []int) { + return file_flyteidl2_core_errors_proto_rawDescGZIP(), []int{1} +} + +func (x *ErrorDocument) GetError() *ContainerError { + if x != nil { + return x.Error + } + return nil +} + +var File_flyteidl2_core_errors_proto protoreflect.FileDescriptor + +var file_flyteidl2_core_errors_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x01, + 0x0a, 0x0e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x12, 0x0a, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x63, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, + 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, + 0x64, 0x52, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x04, 0x4b, 0x69, 0x6e, + 0x64, 0x12, 0x13, 0x0a, 0x0f, 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, + 0x41, 0x42, 0x4c, 0x45, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, + 0x52, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x22, 0x45, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0xb0, + 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, + 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x6f, 0x72, + 0x65, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, + 0x72, 0x65, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, + 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x6f, 0x72, + 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_core_errors_proto_rawDescOnce sync.Once + file_flyteidl2_core_errors_proto_rawDescData = file_flyteidl2_core_errors_proto_rawDesc +) + +func file_flyteidl2_core_errors_proto_rawDescGZIP() []byte { + file_flyteidl2_core_errors_proto_rawDescOnce.Do(func() { + file_flyteidl2_core_errors_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_core_errors_proto_rawDescData) + }) + return file_flyteidl2_core_errors_proto_rawDescData +} + +var file_flyteidl2_core_errors_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_flyteidl2_core_errors_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_flyteidl2_core_errors_proto_goTypes = []interface{}{ + (ContainerError_Kind)(0), // 0: flyteidl2.core.ContainerError.Kind + (*ContainerError)(nil), // 1: flyteidl2.core.ContainerError + (*ErrorDocument)(nil), // 2: flyteidl2.core.ErrorDocument + (ExecutionError_ErrorKind)(0), // 3: flyteidl2.core.ExecutionError.ErrorKind +} +var file_flyteidl2_core_errors_proto_depIdxs = []int32{ + 0, // 0: flyteidl2.core.ContainerError.kind:type_name -> flyteidl2.core.ContainerError.Kind + 3, // 1: flyteidl2.core.ContainerError.origin:type_name -> flyteidl2.core.ExecutionError.ErrorKind + 1, // 2: flyteidl2.core.ErrorDocument.error:type_name -> flyteidl2.core.ContainerError + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_flyteidl2_core_errors_proto_init() } +func file_flyteidl2_core_errors_proto_init() { + if File_flyteidl2_core_errors_proto != nil { + return + } + file_flyteidl2_core_execution_proto_init() + if !protoimpl.UnsafeEnabled { + file_flyteidl2_core_errors_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ContainerError); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_core_errors_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ErrorDocument); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_core_errors_proto_rawDesc, + NumEnums: 1, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_core_errors_proto_goTypes, + DependencyIndexes: file_flyteidl2_core_errors_proto_depIdxs, + EnumInfos: file_flyteidl2_core_errors_proto_enumTypes, + MessageInfos: file_flyteidl2_core_errors_proto_msgTypes, + }.Build() + File_flyteidl2_core_errors_proto = out.File + file_flyteidl2_core_errors_proto_rawDesc = nil + file_flyteidl2_core_errors_proto_goTypes = nil + file_flyteidl2_core_errors_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/core/errors.pb.validate.go b/gen/go/flyteidl2/core/errors.pb.validate.go new file mode 100644 index 0000000000..2b525ca42c --- /dev/null +++ b/gen/go/flyteidl2/core/errors.pb.validate.go @@ -0,0 +1,273 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/core/errors.proto + +package core + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on ContainerError with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ContainerError) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ContainerError with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ContainerErrorMultiError, +// or nil if none found. +func (m *ContainerError) ValidateAll() error { + return m.validate(true) +} + +func (m *ContainerError) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Code + + // no validation rules for Message + + // no validation rules for Kind + + // no validation rules for Origin + + if len(errors) > 0 { + return ContainerErrorMultiError(errors) + } + + return nil +} + +// ContainerErrorMultiError is an error wrapping multiple validation errors +// returned by ContainerError.ValidateAll() if the designated constraints +// aren't met. +type ContainerErrorMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ContainerErrorMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ContainerErrorMultiError) AllErrors() []error { return m } + +// ContainerErrorValidationError is the validation error returned by +// ContainerError.Validate if the designated constraints aren't met. +type ContainerErrorValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ContainerErrorValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ContainerErrorValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ContainerErrorValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ContainerErrorValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ContainerErrorValidationError) ErrorName() string { return "ContainerErrorValidationError" } + +// Error satisfies the builtin error interface +func (e ContainerErrorValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sContainerError.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ContainerErrorValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ContainerErrorValidationError{} + +// Validate checks the field values on ErrorDocument with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ErrorDocument) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ErrorDocument with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ErrorDocumentMultiError, or +// nil if none found. +func (m *ErrorDocument) ValidateAll() error { + return m.validate(true) +} + +func (m *ErrorDocument) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetError()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ErrorDocumentValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ErrorDocumentValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetError()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ErrorDocumentValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ErrorDocumentMultiError(errors) + } + + return nil +} + +// ErrorDocumentMultiError is an error wrapping multiple validation errors +// returned by ErrorDocument.ValidateAll() if the designated constraints +// aren't met. +type ErrorDocumentMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ErrorDocumentMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ErrorDocumentMultiError) AllErrors() []error { return m } + +// ErrorDocumentValidationError is the validation error returned by +// ErrorDocument.Validate if the designated constraints aren't met. +type ErrorDocumentValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ErrorDocumentValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ErrorDocumentValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ErrorDocumentValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ErrorDocumentValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ErrorDocumentValidationError) ErrorName() string { return "ErrorDocumentValidationError" } + +// Error satisfies the builtin error interface +func (e ErrorDocumentValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sErrorDocument.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ErrorDocumentValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ErrorDocumentValidationError{} diff --git a/gen/go/flyteidl2/datacatalog/datacatalog.pb.go b/gen/go/flyteidl2/datacatalog/datacatalog.pb.go new file mode 100644 index 0000000000..853444e7ca --- /dev/null +++ b/gen/go/flyteidl2/datacatalog/datacatalog.pb.go @@ -0,0 +1,3566 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/datacatalog/datacatalog.proto + +package datacatalog + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// as use-cases come up we can add more operators, ex: gte, like, not eq etc. +type SinglePropertyFilter_ComparisonOperator int32 + +const ( + SinglePropertyFilter_EQUALS SinglePropertyFilter_ComparisonOperator = 0 +) + +// Enum value maps for SinglePropertyFilter_ComparisonOperator. +var ( + SinglePropertyFilter_ComparisonOperator_name = map[int32]string{ + 0: "EQUALS", + } + SinglePropertyFilter_ComparisonOperator_value = map[string]int32{ + "EQUALS": 0, + } +) + +func (x SinglePropertyFilter_ComparisonOperator) Enum() *SinglePropertyFilter_ComparisonOperator { + p := new(SinglePropertyFilter_ComparisonOperator) + *p = x + return p +} + +func (x SinglePropertyFilter_ComparisonOperator) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (SinglePropertyFilter_ComparisonOperator) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[0].Descriptor() +} + +func (SinglePropertyFilter_ComparisonOperator) Type() protoreflect.EnumType { + return &file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[0] +} + +func (x SinglePropertyFilter_ComparisonOperator) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use SinglePropertyFilter_ComparisonOperator.Descriptor instead. +func (SinglePropertyFilter_ComparisonOperator) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{30, 0} +} + +type PaginationOptions_SortOrder int32 + +const ( + PaginationOptions_DESCENDING PaginationOptions_SortOrder = 0 + PaginationOptions_ASCENDING PaginationOptions_SortOrder = 1 +) + +// Enum value maps for PaginationOptions_SortOrder. +var ( + PaginationOptions_SortOrder_name = map[int32]string{ + 0: "DESCENDING", + 1: "ASCENDING", + } + PaginationOptions_SortOrder_value = map[string]int32{ + "DESCENDING": 0, + "ASCENDING": 1, + } +) + +func (x PaginationOptions_SortOrder) Enum() *PaginationOptions_SortOrder { + p := new(PaginationOptions_SortOrder) + *p = x + return p +} + +func (x PaginationOptions_SortOrder) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PaginationOptions_SortOrder) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[1].Descriptor() +} + +func (PaginationOptions_SortOrder) Type() protoreflect.EnumType { + return &file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[1] +} + +func (x PaginationOptions_SortOrder) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PaginationOptions_SortOrder.Descriptor instead. +func (PaginationOptions_SortOrder) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{36, 0} +} + +type PaginationOptions_SortKey int32 + +const ( + PaginationOptions_CREATION_TIME PaginationOptions_SortKey = 0 +) + +// Enum value maps for PaginationOptions_SortKey. +var ( + PaginationOptions_SortKey_name = map[int32]string{ + 0: "CREATION_TIME", + } + PaginationOptions_SortKey_value = map[string]int32{ + "CREATION_TIME": 0, + } +) + +func (x PaginationOptions_SortKey) Enum() *PaginationOptions_SortKey { + p := new(PaginationOptions_SortKey) + *p = x + return p +} + +func (x PaginationOptions_SortKey) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (PaginationOptions_SortKey) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[2].Descriptor() +} + +func (PaginationOptions_SortKey) Type() protoreflect.EnumType { + return &file_flyteidl2_datacatalog_datacatalog_proto_enumTypes[2] +} + +func (x PaginationOptions_SortKey) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use PaginationOptions_SortKey.Descriptor instead. +func (PaginationOptions_SortKey) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{36, 1} +} + +// Request message for creating a Dataset. +type CreateDatasetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dataset *Dataset `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` +} + +func (x *CreateDatasetRequest) Reset() { + *x = CreateDatasetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDatasetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDatasetRequest) ProtoMessage() {} + +func (x *CreateDatasetRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDatasetRequest.ProtoReflect.Descriptor instead. +func (*CreateDatasetRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{0} +} + +func (x *CreateDatasetRequest) GetDataset() *Dataset { + if x != nil { + return x.Dataset + } + return nil +} + +// Response message for creating a Dataset +type CreateDatasetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateDatasetResponse) Reset() { + *x = CreateDatasetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateDatasetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateDatasetResponse) ProtoMessage() {} + +func (x *CreateDatasetResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateDatasetResponse.ProtoReflect.Descriptor instead. +func (*CreateDatasetResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{1} +} + +// Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier +// which is a combination of several fields. +type GetDatasetRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dataset *DatasetID `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` +} + +func (x *GetDatasetRequest) Reset() { + *x = GetDatasetRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatasetRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatasetRequest) ProtoMessage() {} + +func (x *GetDatasetRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatasetRequest.ProtoReflect.Descriptor instead. +func (*GetDatasetRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{2} +} + +func (x *GetDatasetRequest) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +// Response message for retrieving a Dataset. The response will include the metadata for the +// Dataset. +type GetDatasetResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dataset *Dataset `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` +} + +func (x *GetDatasetResponse) Reset() { + *x = GetDatasetResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetDatasetResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetDatasetResponse) ProtoMessage() {} + +func (x *GetDatasetResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetDatasetResponse.ProtoReflect.Descriptor instead. +func (*GetDatasetResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{3} +} + +func (x *GetDatasetResponse) GetDataset() *Dataset { + if x != nil { + return x.Dataset + } + return nil +} + +// Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that +// can be one of artifact_id or tag. The result returned will include the artifact data and metadata +// associated with the artifact. +type GetArtifactRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dataset *DatasetID `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` + // Types that are assignable to QueryHandle: + // + // *GetArtifactRequest_ArtifactId + // *GetArtifactRequest_TagName + QueryHandle isGetArtifactRequest_QueryHandle `protobuf_oneof:"query_handle"` +} + +func (x *GetArtifactRequest) Reset() { + *x = GetArtifactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetArtifactRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetArtifactRequest) ProtoMessage() {} + +func (x *GetArtifactRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetArtifactRequest.ProtoReflect.Descriptor instead. +func (*GetArtifactRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{4} +} + +func (x *GetArtifactRequest) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +func (m *GetArtifactRequest) GetQueryHandle() isGetArtifactRequest_QueryHandle { + if m != nil { + return m.QueryHandle + } + return nil +} + +func (x *GetArtifactRequest) GetArtifactId() string { + if x, ok := x.GetQueryHandle().(*GetArtifactRequest_ArtifactId); ok { + return x.ArtifactId + } + return "" +} + +func (x *GetArtifactRequest) GetTagName() string { + if x, ok := x.GetQueryHandle().(*GetArtifactRequest_TagName); ok { + return x.TagName + } + return "" +} + +type isGetArtifactRequest_QueryHandle interface { + isGetArtifactRequest_QueryHandle() +} + +type GetArtifactRequest_ArtifactId struct { + ArtifactId string `protobuf:"bytes,2,opt,name=artifact_id,json=artifactId,proto3,oneof"` +} + +type GetArtifactRequest_TagName struct { + TagName string `protobuf:"bytes,3,opt,name=tag_name,json=tagName,proto3,oneof"` +} + +func (*GetArtifactRequest_ArtifactId) isGetArtifactRequest_QueryHandle() {} + +func (*GetArtifactRequest_TagName) isGetArtifactRequest_QueryHandle() {} + +// Response message for retrieving an Artifact. The result returned will include the artifact data +// and metadata associated with the artifact. +type GetArtifactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Artifact *Artifact `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` +} + +func (x *GetArtifactResponse) Reset() { + *x = GetArtifactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetArtifactResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetArtifactResponse) ProtoMessage() {} + +func (x *GetArtifactResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetArtifactResponse.ProtoReflect.Descriptor instead. +func (*GetArtifactResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{5} +} + +func (x *GetArtifactResponse) GetArtifact() *Artifact { + if x != nil { + return x.Artifact + } + return nil +} + +// Request message for creating an Artifact and its associated artifact Data. +type CreateArtifactRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Artifact *Artifact `protobuf:"bytes,1,opt,name=artifact,proto3" json:"artifact,omitempty"` +} + +func (x *CreateArtifactRequest) Reset() { + *x = CreateArtifactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateArtifactRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateArtifactRequest) ProtoMessage() {} + +func (x *CreateArtifactRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateArtifactRequest.ProtoReflect.Descriptor instead. +func (*CreateArtifactRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{6} +} + +func (x *CreateArtifactRequest) GetArtifact() *Artifact { + if x != nil { + return x.Artifact + } + return nil +} + +// Response message for creating an Artifact. +type CreateArtifactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *CreateArtifactResponse) Reset() { + *x = CreateArtifactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateArtifactResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateArtifactResponse) ProtoMessage() {} + +func (x *CreateArtifactResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateArtifactResponse.ProtoReflect.Descriptor instead. +func (*CreateArtifactResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{7} +} + +// Request message for tagging an Artifact. +type AddTagRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tag *Tag `protobuf:"bytes,1,opt,name=tag,proto3" json:"tag,omitempty"` +} + +func (x *AddTagRequest) Reset() { + *x = AddTagRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddTagRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddTagRequest) ProtoMessage() {} + +func (x *AddTagRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddTagRequest.ProtoReflect.Descriptor instead. +func (*AddTagRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{8} +} + +func (x *AddTagRequest) GetTag() *Tag { + if x != nil { + return x.Tag + } + return nil +} + +// Response message for tagging an Artifact. +type AddTagResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *AddTagResponse) Reset() { + *x = AddTagResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AddTagResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AddTagResponse) ProtoMessage() {} + +func (x *AddTagResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AddTagResponse.ProtoReflect.Descriptor instead. +func (*AddTagResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{9} +} + +// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. +type ListArtifactsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Use a datasetID for which you want to retrieve the artifacts + Dataset *DatasetID `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` + // Apply the filter expression to this query + Filter *FilterExpression `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` + // Pagination options to get a page of artifacts + Pagination *PaginationOptions `protobuf:"bytes,3,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (x *ListArtifactsRequest) Reset() { + *x = ListArtifactsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListArtifactsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListArtifactsRequest) ProtoMessage() {} + +func (x *ListArtifactsRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListArtifactsRequest.ProtoReflect.Descriptor instead. +func (*ListArtifactsRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{10} +} + +func (x *ListArtifactsRequest) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +func (x *ListArtifactsRequest) GetFilter() *FilterExpression { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ListArtifactsRequest) GetPagination() *PaginationOptions { + if x != nil { + return x.Pagination + } + return nil +} + +// Response to list artifacts +type ListArtifactsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of artifacts + Artifacts []*Artifact `protobuf:"bytes,1,rep,name=artifacts,proto3" json:"artifacts,omitempty"` + // Token to use to request the next page, pass this into the next requests PaginationOptions + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` +} + +func (x *ListArtifactsResponse) Reset() { + *x = ListArtifactsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListArtifactsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListArtifactsResponse) ProtoMessage() {} + +func (x *ListArtifactsResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListArtifactsResponse.ProtoReflect.Descriptor instead. +func (*ListArtifactsResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{11} +} + +func (x *ListArtifactsResponse) GetArtifacts() []*Artifact { + if x != nil { + return x.Artifacts + } + return nil +} + +func (x *ListArtifactsResponse) GetNextToken() string { + if x != nil { + return x.NextToken + } + return "" +} + +// List the datasets for the given query +type ListDatasetsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Apply the filter expression to this query + Filter *FilterExpression `protobuf:"bytes,1,opt,name=filter,proto3" json:"filter,omitempty"` + // Pagination options to get a page of datasets + Pagination *PaginationOptions `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (x *ListDatasetsRequest) Reset() { + *x = ListDatasetsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDatasetsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDatasetsRequest) ProtoMessage() {} + +func (x *ListDatasetsRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDatasetsRequest.ProtoReflect.Descriptor instead. +func (*ListDatasetsRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{12} +} + +func (x *ListDatasetsRequest) GetFilter() *FilterExpression { + if x != nil { + return x.Filter + } + return nil +} + +func (x *ListDatasetsRequest) GetPagination() *PaginationOptions { + if x != nil { + return x.Pagination + } + return nil +} + +// List the datasets response with token for next pagination +type ListDatasetsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The list of datasets + Datasets []*Dataset `protobuf:"bytes,1,rep,name=datasets,proto3" json:"datasets,omitempty"` + // Token to use to request the next page, pass this into the next requests PaginationOptions + NextToken string `protobuf:"bytes,2,opt,name=next_token,json=nextToken,proto3" json:"next_token,omitempty"` +} + +func (x *ListDatasetsResponse) Reset() { + *x = ListDatasetsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListDatasetsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListDatasetsResponse) ProtoMessage() {} + +func (x *ListDatasetsResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListDatasetsResponse.ProtoReflect.Descriptor instead. +func (*ListDatasetsResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{13} +} + +func (x *ListDatasetsResponse) GetDatasets() []*Dataset { + if x != nil { + return x.Datasets + } + return nil +} + +func (x *ListDatasetsResponse) GetNextToken() string { + if x != nil { + return x.NextToken + } + return "" +} + +// Request message for updating an Artifact and overwriting its associated ArtifactData. +type UpdateArtifactRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of dataset the artifact is associated with + Dataset *DatasetID `protobuf:"bytes,1,opt,name=dataset,proto3" json:"dataset,omitempty"` + // Either ID of artifact or name of tag to retrieve existing artifact from + // + // Types that are assignable to QueryHandle: + // + // *UpdateArtifactRequest_ArtifactId + // *UpdateArtifactRequest_TagName + QueryHandle isUpdateArtifactRequest_QueryHandle `protobuf_oneof:"query_handle"` + // List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + // ArtifactData entries will be removed from the underlying blob storage and database. + Data []*ArtifactData `protobuf:"bytes,4,rep,name=data,proto3" json:"data,omitempty"` + // Update execution metadata(including execution domain, name, node, project data) when overwriting cache + Metadata *Metadata `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *UpdateArtifactRequest) Reset() { + *x = UpdateArtifactRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateArtifactRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateArtifactRequest) ProtoMessage() {} + +func (x *UpdateArtifactRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateArtifactRequest.ProtoReflect.Descriptor instead. +func (*UpdateArtifactRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{14} +} + +func (x *UpdateArtifactRequest) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +func (m *UpdateArtifactRequest) GetQueryHandle() isUpdateArtifactRequest_QueryHandle { + if m != nil { + return m.QueryHandle + } + return nil +} + +func (x *UpdateArtifactRequest) GetArtifactId() string { + if x, ok := x.GetQueryHandle().(*UpdateArtifactRequest_ArtifactId); ok { + return x.ArtifactId + } + return "" +} + +func (x *UpdateArtifactRequest) GetTagName() string { + if x, ok := x.GetQueryHandle().(*UpdateArtifactRequest_TagName); ok { + return x.TagName + } + return "" +} + +func (x *UpdateArtifactRequest) GetData() []*ArtifactData { + if x != nil { + return x.Data + } + return nil +} + +func (x *UpdateArtifactRequest) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +type isUpdateArtifactRequest_QueryHandle interface { + isUpdateArtifactRequest_QueryHandle() +} + +type UpdateArtifactRequest_ArtifactId struct { + ArtifactId string `protobuf:"bytes,2,opt,name=artifact_id,json=artifactId,proto3,oneof"` +} + +type UpdateArtifactRequest_TagName struct { + TagName string `protobuf:"bytes,3,opt,name=tag_name,json=tagName,proto3,oneof"` +} + +func (*UpdateArtifactRequest_ArtifactId) isUpdateArtifactRequest_QueryHandle() {} + +func (*UpdateArtifactRequest_TagName) isUpdateArtifactRequest_QueryHandle() {} + +// Response message for updating an Artifact. +type UpdateArtifactResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique ID of the artifact updated + ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"` +} + +func (x *UpdateArtifactResponse) Reset() { + *x = UpdateArtifactResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateArtifactResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateArtifactResponse) ProtoMessage() {} + +func (x *UpdateArtifactResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateArtifactResponse.ProtoReflect.Descriptor instead. +func (*UpdateArtifactResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{15} +} + +func (x *UpdateArtifactResponse) GetArtifactId() string { + if x != nil { + return x.ArtifactId + } + return "" +} + +// ReservationID message that is composed of several string fields. +type ReservationID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique ID for the reserved dataset + DatasetId *DatasetID `protobuf:"bytes,1,opt,name=dataset_id,json=datasetId,proto3" json:"dataset_id,omitempty"` + // The specific artifact tag for the reservation + TagName string `protobuf:"bytes,2,opt,name=tag_name,json=tagName,proto3" json:"tag_name,omitempty"` +} + +func (x *ReservationID) Reset() { + *x = ReservationID{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReservationID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReservationID) ProtoMessage() {} + +func (x *ReservationID) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReservationID.ProtoReflect.Descriptor instead. +func (*ReservationID) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{16} +} + +func (x *ReservationID) GetDatasetId() *DatasetID { + if x != nil { + return x.DatasetId + } + return nil +} + +func (x *ReservationID) GetTagName() string { + if x != nil { + return x.TagName + } + return "" +} + +// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. +type GetOrExtendReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique ID for the reservation + ReservationId *ReservationID `protobuf:"bytes,1,opt,name=reservation_id,json=reservationId,proto3" json:"reservation_id,omitempty"` + // The unique ID of the owner for the reservation + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` + // Requested reservation extension heartbeat interval + HeartbeatInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` +} + +func (x *GetOrExtendReservationRequest) Reset() { + *x = GetOrExtendReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOrExtendReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrExtendReservationRequest) ProtoMessage() {} + +func (x *GetOrExtendReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrExtendReservationRequest.ProtoReflect.Descriptor instead. +func (*GetOrExtendReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{17} +} + +func (x *GetOrExtendReservationRequest) GetReservationId() *ReservationID { + if x != nil { + return x.ReservationId + } + return nil +} + +func (x *GetOrExtendReservationRequest) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +func (x *GetOrExtendReservationRequest) GetHeartbeatInterval() *durationpb.Duration { + if x != nil { + return x.HeartbeatInterval + } + return nil +} + +// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +type Reservation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique ID for the reservation + ReservationId *ReservationID `protobuf:"bytes,1,opt,name=reservation_id,json=reservationId,proto3" json:"reservation_id,omitempty"` + // The unique ID of the owner for the reservation + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` + // Recommended heartbeat interval to extend reservation + HeartbeatInterval *durationpb.Duration `protobuf:"bytes,3,opt,name=heartbeat_interval,json=heartbeatInterval,proto3" json:"heartbeat_interval,omitempty"` + // Expiration timestamp of this reservation + ExpiresAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=expires_at,json=expiresAt,proto3" json:"expires_at,omitempty"` + // Free-form metadata associated with the artifact + Metadata *Metadata `protobuf:"bytes,6,opt,name=metadata,proto3" json:"metadata,omitempty"` +} + +func (x *Reservation) Reset() { + *x = Reservation{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Reservation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Reservation) ProtoMessage() {} + +func (x *Reservation) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Reservation.ProtoReflect.Descriptor instead. +func (*Reservation) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{18} +} + +func (x *Reservation) GetReservationId() *ReservationID { + if x != nil { + return x.ReservationId + } + return nil +} + +func (x *Reservation) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +func (x *Reservation) GetHeartbeatInterval() *durationpb.Duration { + if x != nil { + return x.HeartbeatInterval + } + return nil +} + +func (x *Reservation) GetExpiresAt() *timestamppb.Timestamp { + if x != nil { + return x.ExpiresAt + } + return nil +} + +func (x *Reservation) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +// Response including either a newly minted reservation or the existing reservation +type GetOrExtendReservationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The reservation to be acquired or extended + Reservation *Reservation `protobuf:"bytes,1,opt,name=reservation,proto3" json:"reservation,omitempty"` +} + +func (x *GetOrExtendReservationResponse) Reset() { + *x = GetOrExtendReservationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetOrExtendReservationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetOrExtendReservationResponse) ProtoMessage() {} + +func (x *GetOrExtendReservationResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetOrExtendReservationResponse.ProtoReflect.Descriptor instead. +func (*GetOrExtendReservationResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{19} +} + +func (x *GetOrExtendReservationResponse) GetReservation() *Reservation { + if x != nil { + return x.Reservation + } + return nil +} + +// Request to release reservation +type ReleaseReservationRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The unique ID for the reservation + ReservationId *ReservationID `protobuf:"bytes,1,opt,name=reservation_id,json=reservationId,proto3" json:"reservation_id,omitempty"` + // The unique ID of the owner for the reservation + OwnerId string `protobuf:"bytes,2,opt,name=owner_id,json=ownerId,proto3" json:"owner_id,omitempty"` +} + +func (x *ReleaseReservationRequest) Reset() { + *x = ReleaseReservationRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseReservationRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseReservationRequest) ProtoMessage() {} + +func (x *ReleaseReservationRequest) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseReservationRequest.ProtoReflect.Descriptor instead. +func (*ReleaseReservationRequest) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{20} +} + +func (x *ReleaseReservationRequest) GetReservationId() *ReservationID { + if x != nil { + return x.ReservationId + } + return nil +} + +func (x *ReleaseReservationRequest) GetOwnerId() string { + if x != nil { + return x.OwnerId + } + return "" +} + +// Response to release reservation +type ReleaseReservationResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ReleaseReservationResponse) Reset() { + *x = ReleaseReservationResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReleaseReservationResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReleaseReservationResponse) ProtoMessage() {} + +func (x *ReleaseReservationResponse) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReleaseReservationResponse.ProtoReflect.Descriptor instead. +func (*ReleaseReservationResponse) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{21} +} + +// Dataset message. It is uniquely identified by DatasetID. +type Dataset struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id *DatasetID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Metadata *Metadata `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + PartitionKeys []string `protobuf:"bytes,3,rep,name=partitionKeys,proto3" json:"partitionKeys,omitempty"` +} + +func (x *Dataset) Reset() { + *x = Dataset{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Dataset) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Dataset) ProtoMessage() {} + +func (x *Dataset) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Dataset.ProtoReflect.Descriptor instead. +func (*Dataset) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{22} +} + +func (x *Dataset) GetId() *DatasetID { + if x != nil { + return x.Id + } + return nil +} + +func (x *Dataset) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Dataset) GetPartitionKeys() []string { + if x != nil { + return x.PartitionKeys + } + return nil +} + +// An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair +type Partition struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Partition) Reset() { + *x = Partition{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Partition) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Partition) ProtoMessage() {} + +func (x *Partition) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Partition.ProtoReflect.Descriptor instead. +func (*Partition) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{23} +} + +func (x *Partition) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *Partition) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// DatasetID message that is composed of several string fields. +type DatasetID struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` // The name of the project + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // The name of the dataset + Domain string `protobuf:"bytes,3,opt,name=domain,proto3" json:"domain,omitempty"` // The domain (eg. environment) + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` // Version of the data schema + UUID string `protobuf:"bytes,5,opt,name=UUID,proto3" json:"UUID,omitempty"` // UUID for the dataset (if set the above fields are optional) + // Optional, org key applied to the resource. + Org string `protobuf:"bytes,6,opt,name=org,proto3" json:"org,omitempty"` +} + +func (x *DatasetID) Reset() { + *x = DatasetID{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatasetID) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatasetID) ProtoMessage() {} + +func (x *DatasetID) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatasetID.ProtoReflect.Descriptor instead. +func (*DatasetID) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{24} +} + +func (x *DatasetID) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *DatasetID) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DatasetID) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +func (x *DatasetID) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *DatasetID) GetUUID() string { + if x != nil { + return x.UUID + } + return "" +} + +func (x *DatasetID) GetOrg() string { + if x != nil { + return x.Org + } + return "" +} + +// Artifact message. It is composed of several string fields. +type Artifact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The unique ID of the artifact + Dataset *DatasetID `protobuf:"bytes,2,opt,name=dataset,proto3" json:"dataset,omitempty"` // The Dataset that the artifact belongs to + Data []*ArtifactData `protobuf:"bytes,3,rep,name=data,proto3" json:"data,omitempty"` // A list of data that is associated with the artifact + Metadata *Metadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty"` // Free-form metadata associated with the artifact + Partitions []*Partition `protobuf:"bytes,5,rep,name=partitions,proto3" json:"partitions,omitempty"` + Tags []*Tag `protobuf:"bytes,6,rep,name=tags,proto3" json:"tags,omitempty"` + CreatedAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` // creation timestamp of artifact, autogenerated by service +} + +func (x *Artifact) Reset() { + *x = Artifact{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Artifact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Artifact) ProtoMessage() {} + +func (x *Artifact) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Artifact.ProtoReflect.Descriptor instead. +func (*Artifact) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{25} +} + +func (x *Artifact) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Artifact) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +func (x *Artifact) GetData() []*ArtifactData { + if x != nil { + return x.Data + } + return nil +} + +func (x *Artifact) GetMetadata() *Metadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *Artifact) GetPartitions() []*Partition { + if x != nil { + return x.Partitions + } + return nil +} + +func (x *Artifact) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Artifact) GetCreatedAt() *timestamppb.Timestamp { + if x != nil { + return x.CreatedAt + } + return nil +} + +// ArtifactData that belongs to an artifact +type ArtifactData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Value *core.Literal `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ArtifactData) Reset() { + *x = ArtifactData{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArtifactData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArtifactData) ProtoMessage() {} + +func (x *ArtifactData) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArtifactData.ProtoReflect.Descriptor instead. +func (*ArtifactData) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{26} +} + +func (x *ArtifactData) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ArtifactData) GetValue() *core.Literal { + if x != nil { + return x.Value + } + return nil +} + +// Tag message that is unique to a Dataset. It is associated to a single artifact and +// can be retrieved by name later. +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Name of tag + ArtifactId string `protobuf:"bytes,2,opt,name=artifact_id,json=artifactId,proto3" json:"artifact_id,omitempty"` // The tagged artifact + Dataset *DatasetID `protobuf:"bytes,3,opt,name=dataset,proto3" json:"dataset,omitempty"` // The Dataset that this tag belongs to +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{27} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetArtifactId() string { + if x != nil { + return x.ArtifactId + } + return "" +} + +func (x *Tag) GetDataset() *DatasetID { + if x != nil { + return x.Dataset + } + return nil +} + +// Metadata representation for artifacts and datasets +type Metadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + KeyMap map[string]string `protobuf:"bytes,1,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // key map is a dictionary of key/val strings that represent metadata +} + +func (x *Metadata) Reset() { + *x = Metadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Metadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Metadata) ProtoMessage() {} + +func (x *Metadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Metadata.ProtoReflect.Descriptor instead. +func (*Metadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{28} +} + +func (x *Metadata) GetKeyMap() map[string]string { + if x != nil { + return x.KeyMap + } + return nil +} + +// Filter expression that is composed of a combination of single filters +type FilterExpression struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Filters []*SinglePropertyFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"` +} + +func (x *FilterExpression) Reset() { + *x = FilterExpression{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FilterExpression) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FilterExpression) ProtoMessage() {} + +func (x *FilterExpression) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FilterExpression.ProtoReflect.Descriptor instead. +func (*FilterExpression) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{29} +} + +func (x *FilterExpression) GetFilters() []*SinglePropertyFilter { + if x != nil { + return x.Filters + } + return nil +} + +// A single property to filter on. +type SinglePropertyFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to PropertyFilter: + // + // *SinglePropertyFilter_TagFilter + // *SinglePropertyFilter_PartitionFilter + // *SinglePropertyFilter_ArtifactFilter + // *SinglePropertyFilter_DatasetFilter + PropertyFilter isSinglePropertyFilter_PropertyFilter `protobuf_oneof:"property_filter"` + Operator SinglePropertyFilter_ComparisonOperator `protobuf:"varint,10,opt,name=operator,proto3,enum=flyteidl2.datacatalog.SinglePropertyFilter_ComparisonOperator" json:"operator,omitempty"` // field 10 in case we add more entities to query +} + +func (x *SinglePropertyFilter) Reset() { + *x = SinglePropertyFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SinglePropertyFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SinglePropertyFilter) ProtoMessage() {} + +func (x *SinglePropertyFilter) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SinglePropertyFilter.ProtoReflect.Descriptor instead. +func (*SinglePropertyFilter) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{30} +} + +func (m *SinglePropertyFilter) GetPropertyFilter() isSinglePropertyFilter_PropertyFilter { + if m != nil { + return m.PropertyFilter + } + return nil +} + +func (x *SinglePropertyFilter) GetTagFilter() *TagPropertyFilter { + if x, ok := x.GetPropertyFilter().(*SinglePropertyFilter_TagFilter); ok { + return x.TagFilter + } + return nil +} + +func (x *SinglePropertyFilter) GetPartitionFilter() *PartitionPropertyFilter { + if x, ok := x.GetPropertyFilter().(*SinglePropertyFilter_PartitionFilter); ok { + return x.PartitionFilter + } + return nil +} + +func (x *SinglePropertyFilter) GetArtifactFilter() *ArtifactPropertyFilter { + if x, ok := x.GetPropertyFilter().(*SinglePropertyFilter_ArtifactFilter); ok { + return x.ArtifactFilter + } + return nil +} + +func (x *SinglePropertyFilter) GetDatasetFilter() *DatasetPropertyFilter { + if x, ok := x.GetPropertyFilter().(*SinglePropertyFilter_DatasetFilter); ok { + return x.DatasetFilter + } + return nil +} + +func (x *SinglePropertyFilter) GetOperator() SinglePropertyFilter_ComparisonOperator { + if x != nil { + return x.Operator + } + return SinglePropertyFilter_EQUALS +} + +type isSinglePropertyFilter_PropertyFilter interface { + isSinglePropertyFilter_PropertyFilter() +} + +type SinglePropertyFilter_TagFilter struct { + TagFilter *TagPropertyFilter `protobuf:"bytes,1,opt,name=tag_filter,json=tagFilter,proto3,oneof"` +} + +type SinglePropertyFilter_PartitionFilter struct { + PartitionFilter *PartitionPropertyFilter `protobuf:"bytes,2,opt,name=partition_filter,json=partitionFilter,proto3,oneof"` +} + +type SinglePropertyFilter_ArtifactFilter struct { + ArtifactFilter *ArtifactPropertyFilter `protobuf:"bytes,3,opt,name=artifact_filter,json=artifactFilter,proto3,oneof"` +} + +type SinglePropertyFilter_DatasetFilter struct { + DatasetFilter *DatasetPropertyFilter `protobuf:"bytes,4,opt,name=dataset_filter,json=datasetFilter,proto3,oneof"` +} + +func (*SinglePropertyFilter_TagFilter) isSinglePropertyFilter_PropertyFilter() {} + +func (*SinglePropertyFilter_PartitionFilter) isSinglePropertyFilter_PropertyFilter() {} + +func (*SinglePropertyFilter_ArtifactFilter) isSinglePropertyFilter_PropertyFilter() {} + +func (*SinglePropertyFilter_DatasetFilter) isSinglePropertyFilter_PropertyFilter() {} + +// Artifact properties we can filter by +type ArtifactPropertyFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // oneof because we can add more properties in the future + // + // Types that are assignable to Property: + // + // *ArtifactPropertyFilter_ArtifactId + Property isArtifactPropertyFilter_Property `protobuf_oneof:"property"` +} + +func (x *ArtifactPropertyFilter) Reset() { + *x = ArtifactPropertyFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArtifactPropertyFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArtifactPropertyFilter) ProtoMessage() {} + +func (x *ArtifactPropertyFilter) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArtifactPropertyFilter.ProtoReflect.Descriptor instead. +func (*ArtifactPropertyFilter) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{31} +} + +func (m *ArtifactPropertyFilter) GetProperty() isArtifactPropertyFilter_Property { + if m != nil { + return m.Property + } + return nil +} + +func (x *ArtifactPropertyFilter) GetArtifactId() string { + if x, ok := x.GetProperty().(*ArtifactPropertyFilter_ArtifactId); ok { + return x.ArtifactId + } + return "" +} + +type isArtifactPropertyFilter_Property interface { + isArtifactPropertyFilter_Property() +} + +type ArtifactPropertyFilter_ArtifactId struct { + ArtifactId string `protobuf:"bytes,1,opt,name=artifact_id,json=artifactId,proto3,oneof"` +} + +func (*ArtifactPropertyFilter_ArtifactId) isArtifactPropertyFilter_Property() {} + +// Tag properties we can filter by +type TagPropertyFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Property: + // + // *TagPropertyFilter_TagName + Property isTagPropertyFilter_Property `protobuf_oneof:"property"` +} + +func (x *TagPropertyFilter) Reset() { + *x = TagPropertyFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TagPropertyFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TagPropertyFilter) ProtoMessage() {} + +func (x *TagPropertyFilter) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TagPropertyFilter.ProtoReflect.Descriptor instead. +func (*TagPropertyFilter) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{32} +} + +func (m *TagPropertyFilter) GetProperty() isTagPropertyFilter_Property { + if m != nil { + return m.Property + } + return nil +} + +func (x *TagPropertyFilter) GetTagName() string { + if x, ok := x.GetProperty().(*TagPropertyFilter_TagName); ok { + return x.TagName + } + return "" +} + +type isTagPropertyFilter_Property interface { + isTagPropertyFilter_Property() +} + +type TagPropertyFilter_TagName struct { + TagName string `protobuf:"bytes,1,opt,name=tag_name,json=tagName,proto3,oneof"` +} + +func (*TagPropertyFilter_TagName) isTagPropertyFilter_Property() {} + +// Partition properties we can filter by +type PartitionPropertyFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Property: + // + // *PartitionPropertyFilter_KeyVal + Property isPartitionPropertyFilter_Property `protobuf_oneof:"property"` +} + +func (x *PartitionPropertyFilter) Reset() { + *x = PartitionPropertyFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PartitionPropertyFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PartitionPropertyFilter) ProtoMessage() {} + +func (x *PartitionPropertyFilter) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PartitionPropertyFilter.ProtoReflect.Descriptor instead. +func (*PartitionPropertyFilter) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{33} +} + +func (m *PartitionPropertyFilter) GetProperty() isPartitionPropertyFilter_Property { + if m != nil { + return m.Property + } + return nil +} + +func (x *PartitionPropertyFilter) GetKeyVal() *KeyValuePair { + if x, ok := x.GetProperty().(*PartitionPropertyFilter_KeyVal); ok { + return x.KeyVal + } + return nil +} + +type isPartitionPropertyFilter_Property interface { + isPartitionPropertyFilter_Property() +} + +type PartitionPropertyFilter_KeyVal struct { + KeyVal *KeyValuePair `protobuf:"bytes,1,opt,name=key_val,json=keyVal,proto3,oneof"` +} + +func (*PartitionPropertyFilter_KeyVal) isPartitionPropertyFilter_Property() {} + +type KeyValuePair struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValuePair) Reset() { + *x = KeyValuePair{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValuePair) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValuePair) ProtoMessage() {} + +func (x *KeyValuePair) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValuePair.ProtoReflect.Descriptor instead. +func (*KeyValuePair) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{34} +} + +func (x *KeyValuePair) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValuePair) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Dataset properties we can filter by +type DatasetPropertyFilter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Property: + // + // *DatasetPropertyFilter_Project + // *DatasetPropertyFilter_Name + // *DatasetPropertyFilter_Domain + // *DatasetPropertyFilter_Version + // *DatasetPropertyFilter_Org + Property isDatasetPropertyFilter_Property `protobuf_oneof:"property"` +} + +func (x *DatasetPropertyFilter) Reset() { + *x = DatasetPropertyFilter{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DatasetPropertyFilter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DatasetPropertyFilter) ProtoMessage() {} + +func (x *DatasetPropertyFilter) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DatasetPropertyFilter.ProtoReflect.Descriptor instead. +func (*DatasetPropertyFilter) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{35} +} + +func (m *DatasetPropertyFilter) GetProperty() isDatasetPropertyFilter_Property { + if m != nil { + return m.Property + } + return nil +} + +func (x *DatasetPropertyFilter) GetProject() string { + if x, ok := x.GetProperty().(*DatasetPropertyFilter_Project); ok { + return x.Project + } + return "" +} + +func (x *DatasetPropertyFilter) GetName() string { + if x, ok := x.GetProperty().(*DatasetPropertyFilter_Name); ok { + return x.Name + } + return "" +} + +func (x *DatasetPropertyFilter) GetDomain() string { + if x, ok := x.GetProperty().(*DatasetPropertyFilter_Domain); ok { + return x.Domain + } + return "" +} + +func (x *DatasetPropertyFilter) GetVersion() string { + if x, ok := x.GetProperty().(*DatasetPropertyFilter_Version); ok { + return x.Version + } + return "" +} + +func (x *DatasetPropertyFilter) GetOrg() string { + if x, ok := x.GetProperty().(*DatasetPropertyFilter_Org); ok { + return x.Org + } + return "" +} + +type isDatasetPropertyFilter_Property interface { + isDatasetPropertyFilter_Property() +} + +type DatasetPropertyFilter_Project struct { + Project string `protobuf:"bytes,1,opt,name=project,proto3,oneof"` +} + +type DatasetPropertyFilter_Name struct { + Name string `protobuf:"bytes,2,opt,name=name,proto3,oneof"` +} + +type DatasetPropertyFilter_Domain struct { + Domain string `protobuf:"bytes,3,opt,name=domain,proto3,oneof"` +} + +type DatasetPropertyFilter_Version struct { + Version string `protobuf:"bytes,4,opt,name=version,proto3,oneof"` +} + +type DatasetPropertyFilter_Org struct { + // Optional, org key applied to the dataset. + Org string `protobuf:"bytes,5,opt,name=org,proto3,oneof"` +} + +func (*DatasetPropertyFilter_Project) isDatasetPropertyFilter_Property() {} + +func (*DatasetPropertyFilter_Name) isDatasetPropertyFilter_Property() {} + +func (*DatasetPropertyFilter_Domain) isDatasetPropertyFilter_Property() {} + +func (*DatasetPropertyFilter_Version) isDatasetPropertyFilter_Property() {} + +func (*DatasetPropertyFilter_Org) isDatasetPropertyFilter_Property() {} + +// Pagination options for making list requests +type PaginationOptions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // the max number of results to return + Limit uint32 `protobuf:"varint,1,opt,name=limit,proto3" json:"limit,omitempty"` + // the token to pass to fetch the next page + Token string `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"` + // the property that we want to sort the results by + SortKey PaginationOptions_SortKey `protobuf:"varint,3,opt,name=sortKey,proto3,enum=flyteidl2.datacatalog.PaginationOptions_SortKey" json:"sortKey,omitempty"` + // the sort order of the results + SortOrder PaginationOptions_SortOrder `protobuf:"varint,4,opt,name=sortOrder,proto3,enum=flyteidl2.datacatalog.PaginationOptions_SortOrder" json:"sortOrder,omitempty"` +} + +func (x *PaginationOptions) Reset() { + *x = PaginationOptions{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PaginationOptions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PaginationOptions) ProtoMessage() {} + +func (x *PaginationOptions) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PaginationOptions.ProtoReflect.Descriptor instead. +func (*PaginationOptions) Descriptor() ([]byte, []int) { + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP(), []int{36} +} + +func (x *PaginationOptions) GetLimit() uint32 { + if x != nil { + return x.Limit + } + return 0 +} + +func (x *PaginationOptions) GetToken() string { + if x != nil { + return x.Token + } + return "" +} + +func (x *PaginationOptions) GetSortKey() PaginationOptions_SortKey { + if x != nil { + return x.SortKey + } + return PaginationOptions_CREATION_TIME +} + +func (x *PaginationOptions) GetSortOrder() PaginationOptions_SortOrder { + if x != nil { + return x.SortOrder + } + return PaginationOptions_DESCENDING +} + +var File_flyteidl2_datacatalog_datacatalog_proto protoreflect.FileDescriptor + +var file_flyteidl2_datacatalog_datacatalog_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0x50, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x4f, 0x0a, 0x11, 0x47, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x22, 0x4e, 0x0a, 0x12, + 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x22, 0xa0, 0x01, 0x0a, + 0x12, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, + 0x21, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x42, + 0x0e, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, + 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x08, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x22, 0x54, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x08, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, + 0x08, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x03, 0x74, + 0x61, 0x67, 0x22, 0x10, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, + 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, + 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x75, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3d, 0x0a, + 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x0a, + 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xa0, 0x01, 0x0a, 0x13, + 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x71, + 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x08, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x99, 0x02, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x08, 0x74, 0x61, + 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, + 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x0e, 0x0a, + 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x22, 0x39, 0x0a, + 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x6b, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x3f, 0x0a, 0x0a, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, + 0x09, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, + 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, + 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, + 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, + 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xb7, 0x02, 0x0a, 0x0b, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x22, 0x66, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x19, + 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x9e, 0x01, 0x0a, 0x07, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x30, 0x0a, 0x02, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, 0x0a, 0x0d, 0x70, 0x61, + 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, + 0x22, 0x33, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x22, 0xf9, 0x02, 0x0a, 0x08, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x74, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x04, 0x74, 0x61, + 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x51, 0x0a, 0x0c, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x76, 0x0a, 0x03, 0x54, 0x61, 0x67, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x22, 0x8b, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x44, 0x0a, + 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, + 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, + 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x59, + 0x0a, 0x10, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x53, 0x69, 0x6e, 0x67, + 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x80, 0x04, 0x0a, 0x14, 0x53, 0x69, + 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x54, + 0x61, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x48, 0x00, 0x52, 0x09, 0x74, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5b, 0x0a, + 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x70, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x58, 0x0a, 0x0f, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x20, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x61, + 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x0a, 0x0a, + 0x06, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10, 0x00, 0x42, 0x11, 0x0a, 0x0f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x22, 0x47, 0x0a, 0x16, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0x3c, 0x0a, 0x11, 0x54, 0x61, 0x67, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x08, 0x74, 0x61, + 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, + 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x79, 0x22, 0x65, 0x0a, 0x17, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x3e, + 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x50, 0x61, 0x69, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x42, 0x0a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0x36, 0x0a, 0x0c, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x15, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x50, 0x72, + 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1a, 0x0a, 0x07, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, + 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x22, 0xa7, 0x02, 0x0a, 0x11, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4a, 0x0a, 0x07, 0x73, 0x6f, 0x72, 0x74, 0x4b, 0x65, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x53, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x73, 0x6f, 0x72, 0x74, 0x4b, + 0x65, 0x79, 0x12, 0x50, 0x0a, 0x09, 0x73, 0x6f, 0x72, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, + 0x53, 0x6f, 0x72, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x09, 0x73, 0x6f, 0x72, 0x74, 0x4f, + 0x72, 0x64, 0x65, 0x72, 0x22, 0x2a, 0x0a, 0x09, 0x53, 0x6f, 0x72, 0x74, 0x4f, 0x72, 0x64, 0x65, + 0x72, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, + 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x01, + 0x22, 0x1c, 0x0a, 0x07, 0x53, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x11, 0x0a, 0x0d, 0x43, + 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x49, 0x4d, 0x45, 0x10, 0x00, 0x32, 0xcf, + 0x08, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x12, 0x6a, + 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, + 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x0a, 0x47, 0x65, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, 0x0a, + 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, + 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, 0x0b, + 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x29, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, + 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, 0x12, 0x24, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, + 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, 0x0d, 0x4c, 0x69, 0x73, + 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x2b, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6d, + 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x12, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x85, 0x01, + 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0xdf, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x42, 0x10, + 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x48, 0x02, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, + 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xa2, + 0x02, 0x03, 0x46, 0x44, 0x58, 0xaa, 0x02, 0x15, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xca, 0x02, 0x15, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xe2, 0x02, 0x21, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, 0x47, 0x50, + 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x16, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_datacatalog_datacatalog_proto_rawDescOnce sync.Once + file_flyteidl2_datacatalog_datacatalog_proto_rawDescData = file_flyteidl2_datacatalog_datacatalog_proto_rawDesc +) + +func file_flyteidl2_datacatalog_datacatalog_proto_rawDescGZIP() []byte { + file_flyteidl2_datacatalog_datacatalog_proto_rawDescOnce.Do(func() { + file_flyteidl2_datacatalog_datacatalog_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_datacatalog_datacatalog_proto_rawDescData) + }) + return file_flyteidl2_datacatalog_datacatalog_proto_rawDescData +} + +var file_flyteidl2_datacatalog_datacatalog_proto_enumTypes = make([]protoimpl.EnumInfo, 3) +var file_flyteidl2_datacatalog_datacatalog_proto_msgTypes = make([]protoimpl.MessageInfo, 38) +var file_flyteidl2_datacatalog_datacatalog_proto_goTypes = []interface{}{ + (SinglePropertyFilter_ComparisonOperator)(0), // 0: flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperator + (PaginationOptions_SortOrder)(0), // 1: flyteidl2.datacatalog.PaginationOptions.SortOrder + (PaginationOptions_SortKey)(0), // 2: flyteidl2.datacatalog.PaginationOptions.SortKey + (*CreateDatasetRequest)(nil), // 3: flyteidl2.datacatalog.CreateDatasetRequest + (*CreateDatasetResponse)(nil), // 4: flyteidl2.datacatalog.CreateDatasetResponse + (*GetDatasetRequest)(nil), // 5: flyteidl2.datacatalog.GetDatasetRequest + (*GetDatasetResponse)(nil), // 6: flyteidl2.datacatalog.GetDatasetResponse + (*GetArtifactRequest)(nil), // 7: flyteidl2.datacatalog.GetArtifactRequest + (*GetArtifactResponse)(nil), // 8: flyteidl2.datacatalog.GetArtifactResponse + (*CreateArtifactRequest)(nil), // 9: flyteidl2.datacatalog.CreateArtifactRequest + (*CreateArtifactResponse)(nil), // 10: flyteidl2.datacatalog.CreateArtifactResponse + (*AddTagRequest)(nil), // 11: flyteidl2.datacatalog.AddTagRequest + (*AddTagResponse)(nil), // 12: flyteidl2.datacatalog.AddTagResponse + (*ListArtifactsRequest)(nil), // 13: flyteidl2.datacatalog.ListArtifactsRequest + (*ListArtifactsResponse)(nil), // 14: flyteidl2.datacatalog.ListArtifactsResponse + (*ListDatasetsRequest)(nil), // 15: flyteidl2.datacatalog.ListDatasetsRequest + (*ListDatasetsResponse)(nil), // 16: flyteidl2.datacatalog.ListDatasetsResponse + (*UpdateArtifactRequest)(nil), // 17: flyteidl2.datacatalog.UpdateArtifactRequest + (*UpdateArtifactResponse)(nil), // 18: flyteidl2.datacatalog.UpdateArtifactResponse + (*ReservationID)(nil), // 19: flyteidl2.datacatalog.ReservationID + (*GetOrExtendReservationRequest)(nil), // 20: flyteidl2.datacatalog.GetOrExtendReservationRequest + (*Reservation)(nil), // 21: flyteidl2.datacatalog.Reservation + (*GetOrExtendReservationResponse)(nil), // 22: flyteidl2.datacatalog.GetOrExtendReservationResponse + (*ReleaseReservationRequest)(nil), // 23: flyteidl2.datacatalog.ReleaseReservationRequest + (*ReleaseReservationResponse)(nil), // 24: flyteidl2.datacatalog.ReleaseReservationResponse + (*Dataset)(nil), // 25: flyteidl2.datacatalog.Dataset + (*Partition)(nil), // 26: flyteidl2.datacatalog.Partition + (*DatasetID)(nil), // 27: flyteidl2.datacatalog.DatasetID + (*Artifact)(nil), // 28: flyteidl2.datacatalog.Artifact + (*ArtifactData)(nil), // 29: flyteidl2.datacatalog.ArtifactData + (*Tag)(nil), // 30: flyteidl2.datacatalog.Tag + (*Metadata)(nil), // 31: flyteidl2.datacatalog.Metadata + (*FilterExpression)(nil), // 32: flyteidl2.datacatalog.FilterExpression + (*SinglePropertyFilter)(nil), // 33: flyteidl2.datacatalog.SinglePropertyFilter + (*ArtifactPropertyFilter)(nil), // 34: flyteidl2.datacatalog.ArtifactPropertyFilter + (*TagPropertyFilter)(nil), // 35: flyteidl2.datacatalog.TagPropertyFilter + (*PartitionPropertyFilter)(nil), // 36: flyteidl2.datacatalog.PartitionPropertyFilter + (*KeyValuePair)(nil), // 37: flyteidl2.datacatalog.KeyValuePair + (*DatasetPropertyFilter)(nil), // 38: flyteidl2.datacatalog.DatasetPropertyFilter + (*PaginationOptions)(nil), // 39: flyteidl2.datacatalog.PaginationOptions + nil, // 40: flyteidl2.datacatalog.Metadata.KeyMapEntry + (*durationpb.Duration)(nil), // 41: google.protobuf.Duration + (*timestamppb.Timestamp)(nil), // 42: google.protobuf.Timestamp + (*core.Literal)(nil), // 43: flyteidl2.core.Literal +} +var file_flyteidl2_datacatalog_datacatalog_proto_depIdxs = []int32{ + 25, // 0: flyteidl2.datacatalog.CreateDatasetRequest.dataset:type_name -> flyteidl2.datacatalog.Dataset + 27, // 1: flyteidl2.datacatalog.GetDatasetRequest.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 25, // 2: flyteidl2.datacatalog.GetDatasetResponse.dataset:type_name -> flyteidl2.datacatalog.Dataset + 27, // 3: flyteidl2.datacatalog.GetArtifactRequest.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 28, // 4: flyteidl2.datacatalog.GetArtifactResponse.artifact:type_name -> flyteidl2.datacatalog.Artifact + 28, // 5: flyteidl2.datacatalog.CreateArtifactRequest.artifact:type_name -> flyteidl2.datacatalog.Artifact + 30, // 6: flyteidl2.datacatalog.AddTagRequest.tag:type_name -> flyteidl2.datacatalog.Tag + 27, // 7: flyteidl2.datacatalog.ListArtifactsRequest.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 32, // 8: flyteidl2.datacatalog.ListArtifactsRequest.filter:type_name -> flyteidl2.datacatalog.FilterExpression + 39, // 9: flyteidl2.datacatalog.ListArtifactsRequest.pagination:type_name -> flyteidl2.datacatalog.PaginationOptions + 28, // 10: flyteidl2.datacatalog.ListArtifactsResponse.artifacts:type_name -> flyteidl2.datacatalog.Artifact + 32, // 11: flyteidl2.datacatalog.ListDatasetsRequest.filter:type_name -> flyteidl2.datacatalog.FilterExpression + 39, // 12: flyteidl2.datacatalog.ListDatasetsRequest.pagination:type_name -> flyteidl2.datacatalog.PaginationOptions + 25, // 13: flyteidl2.datacatalog.ListDatasetsResponse.datasets:type_name -> flyteidl2.datacatalog.Dataset + 27, // 14: flyteidl2.datacatalog.UpdateArtifactRequest.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 29, // 15: flyteidl2.datacatalog.UpdateArtifactRequest.data:type_name -> flyteidl2.datacatalog.ArtifactData + 31, // 16: flyteidl2.datacatalog.UpdateArtifactRequest.metadata:type_name -> flyteidl2.datacatalog.Metadata + 27, // 17: flyteidl2.datacatalog.ReservationID.dataset_id:type_name -> flyteidl2.datacatalog.DatasetID + 19, // 18: flyteidl2.datacatalog.GetOrExtendReservationRequest.reservation_id:type_name -> flyteidl2.datacatalog.ReservationID + 41, // 19: flyteidl2.datacatalog.GetOrExtendReservationRequest.heartbeat_interval:type_name -> google.protobuf.Duration + 19, // 20: flyteidl2.datacatalog.Reservation.reservation_id:type_name -> flyteidl2.datacatalog.ReservationID + 41, // 21: flyteidl2.datacatalog.Reservation.heartbeat_interval:type_name -> google.protobuf.Duration + 42, // 22: flyteidl2.datacatalog.Reservation.expires_at:type_name -> google.protobuf.Timestamp + 31, // 23: flyteidl2.datacatalog.Reservation.metadata:type_name -> flyteidl2.datacatalog.Metadata + 21, // 24: flyteidl2.datacatalog.GetOrExtendReservationResponse.reservation:type_name -> flyteidl2.datacatalog.Reservation + 19, // 25: flyteidl2.datacatalog.ReleaseReservationRequest.reservation_id:type_name -> flyteidl2.datacatalog.ReservationID + 27, // 26: flyteidl2.datacatalog.Dataset.id:type_name -> flyteidl2.datacatalog.DatasetID + 31, // 27: flyteidl2.datacatalog.Dataset.metadata:type_name -> flyteidl2.datacatalog.Metadata + 27, // 28: flyteidl2.datacatalog.Artifact.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 29, // 29: flyteidl2.datacatalog.Artifact.data:type_name -> flyteidl2.datacatalog.ArtifactData + 31, // 30: flyteidl2.datacatalog.Artifact.metadata:type_name -> flyteidl2.datacatalog.Metadata + 26, // 31: flyteidl2.datacatalog.Artifact.partitions:type_name -> flyteidl2.datacatalog.Partition + 30, // 32: flyteidl2.datacatalog.Artifact.tags:type_name -> flyteidl2.datacatalog.Tag + 42, // 33: flyteidl2.datacatalog.Artifact.created_at:type_name -> google.protobuf.Timestamp + 43, // 34: flyteidl2.datacatalog.ArtifactData.value:type_name -> flyteidl2.core.Literal + 27, // 35: flyteidl2.datacatalog.Tag.dataset:type_name -> flyteidl2.datacatalog.DatasetID + 40, // 36: flyteidl2.datacatalog.Metadata.key_map:type_name -> flyteidl2.datacatalog.Metadata.KeyMapEntry + 33, // 37: flyteidl2.datacatalog.FilterExpression.filters:type_name -> flyteidl2.datacatalog.SinglePropertyFilter + 35, // 38: flyteidl2.datacatalog.SinglePropertyFilter.tag_filter:type_name -> flyteidl2.datacatalog.TagPropertyFilter + 36, // 39: flyteidl2.datacatalog.SinglePropertyFilter.partition_filter:type_name -> flyteidl2.datacatalog.PartitionPropertyFilter + 34, // 40: flyteidl2.datacatalog.SinglePropertyFilter.artifact_filter:type_name -> flyteidl2.datacatalog.ArtifactPropertyFilter + 38, // 41: flyteidl2.datacatalog.SinglePropertyFilter.dataset_filter:type_name -> flyteidl2.datacatalog.DatasetPropertyFilter + 0, // 42: flyteidl2.datacatalog.SinglePropertyFilter.operator:type_name -> flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperator + 37, // 43: flyteidl2.datacatalog.PartitionPropertyFilter.key_val:type_name -> flyteidl2.datacatalog.KeyValuePair + 2, // 44: flyteidl2.datacatalog.PaginationOptions.sortKey:type_name -> flyteidl2.datacatalog.PaginationOptions.SortKey + 1, // 45: flyteidl2.datacatalog.PaginationOptions.sortOrder:type_name -> flyteidl2.datacatalog.PaginationOptions.SortOrder + 3, // 46: flyteidl2.datacatalog.DataCatalog.CreateDataset:input_type -> flyteidl2.datacatalog.CreateDatasetRequest + 5, // 47: flyteidl2.datacatalog.DataCatalog.GetDataset:input_type -> flyteidl2.datacatalog.GetDatasetRequest + 9, // 48: flyteidl2.datacatalog.DataCatalog.CreateArtifact:input_type -> flyteidl2.datacatalog.CreateArtifactRequest + 7, // 49: flyteidl2.datacatalog.DataCatalog.GetArtifact:input_type -> flyteidl2.datacatalog.GetArtifactRequest + 11, // 50: flyteidl2.datacatalog.DataCatalog.AddTag:input_type -> flyteidl2.datacatalog.AddTagRequest + 13, // 51: flyteidl2.datacatalog.DataCatalog.ListArtifacts:input_type -> flyteidl2.datacatalog.ListArtifactsRequest + 15, // 52: flyteidl2.datacatalog.DataCatalog.ListDatasets:input_type -> flyteidl2.datacatalog.ListDatasetsRequest + 17, // 53: flyteidl2.datacatalog.DataCatalog.UpdateArtifact:input_type -> flyteidl2.datacatalog.UpdateArtifactRequest + 20, // 54: flyteidl2.datacatalog.DataCatalog.GetOrExtendReservation:input_type -> flyteidl2.datacatalog.GetOrExtendReservationRequest + 23, // 55: flyteidl2.datacatalog.DataCatalog.ReleaseReservation:input_type -> flyteidl2.datacatalog.ReleaseReservationRequest + 4, // 56: flyteidl2.datacatalog.DataCatalog.CreateDataset:output_type -> flyteidl2.datacatalog.CreateDatasetResponse + 6, // 57: flyteidl2.datacatalog.DataCatalog.GetDataset:output_type -> flyteidl2.datacatalog.GetDatasetResponse + 10, // 58: flyteidl2.datacatalog.DataCatalog.CreateArtifact:output_type -> flyteidl2.datacatalog.CreateArtifactResponse + 8, // 59: flyteidl2.datacatalog.DataCatalog.GetArtifact:output_type -> flyteidl2.datacatalog.GetArtifactResponse + 12, // 60: flyteidl2.datacatalog.DataCatalog.AddTag:output_type -> flyteidl2.datacatalog.AddTagResponse + 14, // 61: flyteidl2.datacatalog.DataCatalog.ListArtifacts:output_type -> flyteidl2.datacatalog.ListArtifactsResponse + 16, // 62: flyteidl2.datacatalog.DataCatalog.ListDatasets:output_type -> flyteidl2.datacatalog.ListDatasetsResponse + 18, // 63: flyteidl2.datacatalog.DataCatalog.UpdateArtifact:output_type -> flyteidl2.datacatalog.UpdateArtifactResponse + 22, // 64: flyteidl2.datacatalog.DataCatalog.GetOrExtendReservation:output_type -> flyteidl2.datacatalog.GetOrExtendReservationResponse + 24, // 65: flyteidl2.datacatalog.DataCatalog.ReleaseReservation:output_type -> flyteidl2.datacatalog.ReleaseReservationResponse + 56, // [56:66] is the sub-list for method output_type + 46, // [46:56] is the sub-list for method input_type + 46, // [46:46] is the sub-list for extension type_name + 46, // [46:46] is the sub-list for extension extendee + 0, // [0:46] is the sub-list for field type_name +} + +func init() { file_flyteidl2_datacatalog_datacatalog_proto_init() } +func file_flyteidl2_datacatalog_datacatalog_proto_init() { + if File_flyteidl2_datacatalog_datacatalog_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDatasetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateDatasetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatasetRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetDatasetResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetArtifactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetArtifactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateArtifactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateArtifactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddTagRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AddTagResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListArtifactsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListArtifactsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDatasetsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListDatasetsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateArtifactRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateArtifactResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReservationID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOrExtendReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Reservation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetOrExtendReservationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseReservationRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReleaseReservationResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Dataset); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Partition); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatasetID); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Artifact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArtifactData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Metadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FilterExpression); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SinglePropertyFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArtifactPropertyFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TagPropertyFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PartitionPropertyFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValuePair); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DatasetPropertyFilter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PaginationOptions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[4].OneofWrappers = []interface{}{ + (*GetArtifactRequest_ArtifactId)(nil), + (*GetArtifactRequest_TagName)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[14].OneofWrappers = []interface{}{ + (*UpdateArtifactRequest_ArtifactId)(nil), + (*UpdateArtifactRequest_TagName)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*SinglePropertyFilter_TagFilter)(nil), + (*SinglePropertyFilter_PartitionFilter)(nil), + (*SinglePropertyFilter_ArtifactFilter)(nil), + (*SinglePropertyFilter_DatasetFilter)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[31].OneofWrappers = []interface{}{ + (*ArtifactPropertyFilter_ArtifactId)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[32].OneofWrappers = []interface{}{ + (*TagPropertyFilter_TagName)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[33].OneofWrappers = []interface{}{ + (*PartitionPropertyFilter_KeyVal)(nil), + } + file_flyteidl2_datacatalog_datacatalog_proto_msgTypes[35].OneofWrappers = []interface{}{ + (*DatasetPropertyFilter_Project)(nil), + (*DatasetPropertyFilter_Name)(nil), + (*DatasetPropertyFilter_Domain)(nil), + (*DatasetPropertyFilter_Version)(nil), + (*DatasetPropertyFilter_Org)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_datacatalog_datacatalog_proto_rawDesc, + NumEnums: 3, + NumMessages: 38, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_flyteidl2_datacatalog_datacatalog_proto_goTypes, + DependencyIndexes: file_flyteidl2_datacatalog_datacatalog_proto_depIdxs, + EnumInfos: file_flyteidl2_datacatalog_datacatalog_proto_enumTypes, + MessageInfos: file_flyteidl2_datacatalog_datacatalog_proto_msgTypes, + }.Build() + File_flyteidl2_datacatalog_datacatalog_proto = out.File + file_flyteidl2_datacatalog_datacatalog_proto_rawDesc = nil + file_flyteidl2_datacatalog_datacatalog_proto_goTypes = nil + file_flyteidl2_datacatalog_datacatalog_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/datacatalog/datacatalog.pb.validate.go b/gen/go/flyteidl2/datacatalog/datacatalog.pb.validate.go new file mode 100644 index 0000000000..e1c7c07abd --- /dev/null +++ b/gen/go/flyteidl2/datacatalog/datacatalog.pb.validate.go @@ -0,0 +1,5309 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/datacatalog/datacatalog.proto + +package datacatalog + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CreateDatasetRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateDatasetRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateDatasetRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateDatasetRequestMultiError, or nil if none found. +func (m *CreateDatasetRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateDatasetRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CreateDatasetRequestMultiError(errors) + } + + return nil +} + +// CreateDatasetRequestMultiError is an error wrapping multiple validation +// errors returned by CreateDatasetRequest.ValidateAll() if the designated +// constraints aren't met. +type CreateDatasetRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateDatasetRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateDatasetRequestMultiError) AllErrors() []error { return m } + +// CreateDatasetRequestValidationError is the validation error returned by +// CreateDatasetRequest.Validate if the designated constraints aren't met. +type CreateDatasetRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateDatasetRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateDatasetRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateDatasetRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateDatasetRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateDatasetRequestValidationError) ErrorName() string { + return "CreateDatasetRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateDatasetRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateDatasetRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateDatasetRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateDatasetRequestValidationError{} + +// Validate checks the field values on CreateDatasetResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateDatasetResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateDatasetResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateDatasetResponseMultiError, or nil if none found. +func (m *CreateDatasetResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateDatasetResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return CreateDatasetResponseMultiError(errors) + } + + return nil +} + +// CreateDatasetResponseMultiError is an error wrapping multiple validation +// errors returned by CreateDatasetResponse.ValidateAll() if the designated +// constraints aren't met. +type CreateDatasetResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateDatasetResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateDatasetResponseMultiError) AllErrors() []error { return m } + +// CreateDatasetResponseValidationError is the validation error returned by +// CreateDatasetResponse.Validate if the designated constraints aren't met. +type CreateDatasetResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateDatasetResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateDatasetResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateDatasetResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateDatasetResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateDatasetResponseValidationError) ErrorName() string { + return "CreateDatasetResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateDatasetResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateDatasetResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateDatasetResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateDatasetResponseValidationError{} + +// Validate checks the field values on GetDatasetRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *GetDatasetRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetDatasetRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetDatasetRequestMultiError, or nil if none found. +func (m *GetDatasetRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetDatasetRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetDatasetRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetDatasetRequestMultiError(errors) + } + + return nil +} + +// GetDatasetRequestMultiError is an error wrapping multiple validation errors +// returned by GetDatasetRequest.ValidateAll() if the designated constraints +// aren't met. +type GetDatasetRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetDatasetRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetDatasetRequestMultiError) AllErrors() []error { return m } + +// GetDatasetRequestValidationError is the validation error returned by +// GetDatasetRequest.Validate if the designated constraints aren't met. +type GetDatasetRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetDatasetRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetDatasetRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetDatasetRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetDatasetRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetDatasetRequestValidationError) ErrorName() string { + return "GetDatasetRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetDatasetRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetDatasetRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetDatasetRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetDatasetRequestValidationError{} + +// Validate checks the field values on GetDatasetResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetDatasetResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetDatasetResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetDatasetResponseMultiError, or nil if none found. +func (m *GetDatasetResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetDatasetResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetDatasetResponseValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetDatasetResponseValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetDatasetResponseValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetDatasetResponseMultiError(errors) + } + + return nil +} + +// GetDatasetResponseMultiError is an error wrapping multiple validation errors +// returned by GetDatasetResponse.ValidateAll() if the designated constraints +// aren't met. +type GetDatasetResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetDatasetResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetDatasetResponseMultiError) AllErrors() []error { return m } + +// GetDatasetResponseValidationError is the validation error returned by +// GetDatasetResponse.Validate if the designated constraints aren't met. +type GetDatasetResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetDatasetResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetDatasetResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetDatasetResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetDatasetResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetDatasetResponseValidationError) ErrorName() string { + return "GetDatasetResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetDatasetResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetDatasetResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetDatasetResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetDatasetResponseValidationError{} + +// Validate checks the field values on GetArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetArtifactRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetArtifactRequestMultiError, or nil if none found. +func (m *GetArtifactRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetArtifactRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.QueryHandle.(type) { + case *GetArtifactRequest_ArtifactId: + if v == nil { + err := GetArtifactRequestValidationError{ + field: "QueryHandle", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for ArtifactId + case *GetArtifactRequest_TagName: + if v == nil { + err := GetArtifactRequestValidationError{ + field: "QueryHandle", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for TagName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return GetArtifactRequestMultiError(errors) + } + + return nil +} + +// GetArtifactRequestMultiError is an error wrapping multiple validation errors +// returned by GetArtifactRequest.ValidateAll() if the designated constraints +// aren't met. +type GetArtifactRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetArtifactRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetArtifactRequestMultiError) AllErrors() []error { return m } + +// GetArtifactRequestValidationError is the validation error returned by +// GetArtifactRequest.Validate if the designated constraints aren't met. +type GetArtifactRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetArtifactRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetArtifactRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetArtifactRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetArtifactRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetArtifactRequestValidationError) ErrorName() string { + return "GetArtifactRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetArtifactRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetArtifactRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetArtifactRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetArtifactRequestValidationError{} + +// Validate checks the field values on GetArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetArtifactResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// GetArtifactResponseMultiError, or nil if none found. +func (m *GetArtifactResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetArtifactResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetArtifact()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetArtifactResponseValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetArtifactResponseValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetArtifact()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetArtifactResponseValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetArtifactResponseMultiError(errors) + } + + return nil +} + +// GetArtifactResponseMultiError is an error wrapping multiple validation +// errors returned by GetArtifactResponse.ValidateAll() if the designated +// constraints aren't met. +type GetArtifactResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetArtifactResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetArtifactResponseMultiError) AllErrors() []error { return m } + +// GetArtifactResponseValidationError is the validation error returned by +// GetArtifactResponse.Validate if the designated constraints aren't met. +type GetArtifactResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetArtifactResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetArtifactResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetArtifactResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetArtifactResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetArtifactResponseValidationError) ErrorName() string { + return "GetArtifactResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetArtifactResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetArtifactResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetArtifactResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetArtifactResponseValidationError{} + +// Validate checks the field values on CreateArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateArtifactRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateArtifactRequestMultiError, or nil if none found. +func (m *CreateArtifactRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateArtifactRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetArtifact()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CreateArtifactRequestValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CreateArtifactRequestValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetArtifact()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CreateArtifactRequestValidationError{ + field: "Artifact", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return CreateArtifactRequestMultiError(errors) + } + + return nil +} + +// CreateArtifactRequestMultiError is an error wrapping multiple validation +// errors returned by CreateArtifactRequest.ValidateAll() if the designated +// constraints aren't met. +type CreateArtifactRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateArtifactRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateArtifactRequestMultiError) AllErrors() []error { return m } + +// CreateArtifactRequestValidationError is the validation error returned by +// CreateArtifactRequest.Validate if the designated constraints aren't met. +type CreateArtifactRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateArtifactRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateArtifactRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateArtifactRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateArtifactRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateArtifactRequestValidationError) ErrorName() string { + return "CreateArtifactRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateArtifactRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateArtifactRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateArtifactRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateArtifactRequestValidationError{} + +// Validate checks the field values on CreateArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CreateArtifactResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CreateArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CreateArtifactResponseMultiError, or nil if none found. +func (m *CreateArtifactResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *CreateArtifactResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return CreateArtifactResponseMultiError(errors) + } + + return nil +} + +// CreateArtifactResponseMultiError is an error wrapping multiple validation +// errors returned by CreateArtifactResponse.ValidateAll() if the designated +// constraints aren't met. +type CreateArtifactResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CreateArtifactResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CreateArtifactResponseMultiError) AllErrors() []error { return m } + +// CreateArtifactResponseValidationError is the validation error returned by +// CreateArtifactResponse.Validate if the designated constraints aren't met. +type CreateArtifactResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CreateArtifactResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CreateArtifactResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CreateArtifactResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CreateArtifactResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CreateArtifactResponseValidationError) ErrorName() string { + return "CreateArtifactResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e CreateArtifactResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCreateArtifactResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CreateArtifactResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CreateArtifactResponseValidationError{} + +// Validate checks the field values on AddTagRequest with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AddTagRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AddTagRequest with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AddTagRequestMultiError, or +// nil if none found. +func (m *AddTagRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *AddTagRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTag()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, AddTagRequestValidationError{ + field: "Tag", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, AddTagRequestValidationError{ + field: "Tag", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTag()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return AddTagRequestValidationError{ + field: "Tag", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return AddTagRequestMultiError(errors) + } + + return nil +} + +// AddTagRequestMultiError is an error wrapping multiple validation errors +// returned by AddTagRequest.ValidateAll() if the designated constraints +// aren't met. +type AddTagRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AddTagRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AddTagRequestMultiError) AllErrors() []error { return m } + +// AddTagRequestValidationError is the validation error returned by +// AddTagRequest.Validate if the designated constraints aren't met. +type AddTagRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AddTagRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AddTagRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AddTagRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AddTagRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AddTagRequestValidationError) ErrorName() string { return "AddTagRequestValidationError" } + +// Error satisfies the builtin error interface +func (e AddTagRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAddTagRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AddTagRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AddTagRequestValidationError{} + +// Validate checks the field values on AddTagResponse with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *AddTagResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on AddTagResponse with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in AddTagResponseMultiError, +// or nil if none found. +func (m *AddTagResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *AddTagResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return AddTagResponseMultiError(errors) + } + + return nil +} + +// AddTagResponseMultiError is an error wrapping multiple validation errors +// returned by AddTagResponse.ValidateAll() if the designated constraints +// aren't met. +type AddTagResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m AddTagResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m AddTagResponseMultiError) AllErrors() []error { return m } + +// AddTagResponseValidationError is the validation error returned by +// AddTagResponse.Validate if the designated constraints aren't met. +type AddTagResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e AddTagResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e AddTagResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e AddTagResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e AddTagResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e AddTagResponseValidationError) ErrorName() string { return "AddTagResponseValidationError" } + +// Error satisfies the builtin error interface +func (e AddTagResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sAddTagResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = AddTagResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = AddTagResponseValidationError{} + +// Validate checks the field values on ListArtifactsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListArtifactsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListArtifactsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListArtifactsRequestMultiError, or nil if none found. +func (m *ListArtifactsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListArtifactsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListArtifactsRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListArtifactsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPagination()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListArtifactsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPagination()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListArtifactsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListArtifactsRequestMultiError(errors) + } + + return nil +} + +// ListArtifactsRequestMultiError is an error wrapping multiple validation +// errors returned by ListArtifactsRequest.ValidateAll() if the designated +// constraints aren't met. +type ListArtifactsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListArtifactsRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListArtifactsRequestMultiError) AllErrors() []error { return m } + +// ListArtifactsRequestValidationError is the validation error returned by +// ListArtifactsRequest.Validate if the designated constraints aren't met. +type ListArtifactsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListArtifactsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListArtifactsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListArtifactsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListArtifactsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListArtifactsRequestValidationError) ErrorName() string { + return "ListArtifactsRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ListArtifactsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListArtifactsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListArtifactsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListArtifactsRequestValidationError{} + +// Validate checks the field values on ListArtifactsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListArtifactsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListArtifactsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListArtifactsResponseMultiError, or nil if none found. +func (m *ListArtifactsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListArtifactsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetArtifacts() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListArtifactsResponseValidationError{ + field: fmt.Sprintf("Artifacts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListArtifactsResponseValidationError{ + field: fmt.Sprintf("Artifacts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListArtifactsResponseValidationError{ + field: fmt.Sprintf("Artifacts[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for NextToken + + if len(errors) > 0 { + return ListArtifactsResponseMultiError(errors) + } + + return nil +} + +// ListArtifactsResponseMultiError is an error wrapping multiple validation +// errors returned by ListArtifactsResponse.ValidateAll() if the designated +// constraints aren't met. +type ListArtifactsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListArtifactsResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListArtifactsResponseMultiError) AllErrors() []error { return m } + +// ListArtifactsResponseValidationError is the validation error returned by +// ListArtifactsResponse.Validate if the designated constraints aren't met. +type ListArtifactsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListArtifactsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListArtifactsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListArtifactsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListArtifactsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListArtifactsResponseValidationError) ErrorName() string { + return "ListArtifactsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ListArtifactsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListArtifactsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListArtifactsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListArtifactsResponseValidationError{} + +// Validate checks the field values on ListDatasetsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListDatasetsRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListDatasetsRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListDatasetsRequestMultiError, or nil if none found. +func (m *ListDatasetsRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListDatasetsRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListDatasetsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListDatasetsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListDatasetsRequestValidationError{ + field: "Filter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPagination()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListDatasetsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListDatasetsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPagination()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListDatasetsRequestValidationError{ + field: "Pagination", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ListDatasetsRequestMultiError(errors) + } + + return nil +} + +// ListDatasetsRequestMultiError is an error wrapping multiple validation +// errors returned by ListDatasetsRequest.ValidateAll() if the designated +// constraints aren't met. +type ListDatasetsRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListDatasetsRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListDatasetsRequestMultiError) AllErrors() []error { return m } + +// ListDatasetsRequestValidationError is the validation error returned by +// ListDatasetsRequest.Validate if the designated constraints aren't met. +type ListDatasetsRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListDatasetsRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListDatasetsRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListDatasetsRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListDatasetsRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListDatasetsRequestValidationError) ErrorName() string { + return "ListDatasetsRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ListDatasetsRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListDatasetsRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListDatasetsRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListDatasetsRequestValidationError{} + +// Validate checks the field values on ListDatasetsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListDatasetsResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListDatasetsResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ListDatasetsResponseMultiError, or nil if none found. +func (m *ListDatasetsResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListDatasetsResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetDatasets() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ListDatasetsResponseValidationError{ + field: fmt.Sprintf("Datasets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ListDatasetsResponseValidationError{ + field: fmt.Sprintf("Datasets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ListDatasetsResponseValidationError{ + field: fmt.Sprintf("Datasets[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for NextToken + + if len(errors) > 0 { + return ListDatasetsResponseMultiError(errors) + } + + return nil +} + +// ListDatasetsResponseMultiError is an error wrapping multiple validation +// errors returned by ListDatasetsResponse.ValidateAll() if the designated +// constraints aren't met. +type ListDatasetsResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListDatasetsResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListDatasetsResponseMultiError) AllErrors() []error { return m } + +// ListDatasetsResponseValidationError is the validation error returned by +// ListDatasetsResponse.Validate if the designated constraints aren't met. +type ListDatasetsResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListDatasetsResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListDatasetsResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListDatasetsResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListDatasetsResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListDatasetsResponseValidationError) ErrorName() string { + return "ListDatasetsResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ListDatasetsResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListDatasetsResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListDatasetsResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListDatasetsResponseValidationError{} + +// Validate checks the field values on UpdateArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateArtifactRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateArtifactRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateArtifactRequestMultiError, or nil if none found. +func (m *UpdateArtifactRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateArtifactRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateArtifactRequestValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetData() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateArtifactRequestValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, UpdateArtifactRequestValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return UpdateArtifactRequestValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.QueryHandle.(type) { + case *UpdateArtifactRequest_ArtifactId: + if v == nil { + err := UpdateArtifactRequestValidationError{ + field: "QueryHandle", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for ArtifactId + case *UpdateArtifactRequest_TagName: + if v == nil { + err := UpdateArtifactRequestValidationError{ + field: "QueryHandle", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for TagName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return UpdateArtifactRequestMultiError(errors) + } + + return nil +} + +// UpdateArtifactRequestMultiError is an error wrapping multiple validation +// errors returned by UpdateArtifactRequest.ValidateAll() if the designated +// constraints aren't met. +type UpdateArtifactRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateArtifactRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateArtifactRequestMultiError) AllErrors() []error { return m } + +// UpdateArtifactRequestValidationError is the validation error returned by +// UpdateArtifactRequest.Validate if the designated constraints aren't met. +type UpdateArtifactRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateArtifactRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateArtifactRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateArtifactRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateArtifactRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateArtifactRequestValidationError) ErrorName() string { + return "UpdateArtifactRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateArtifactRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateArtifactRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateArtifactRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateArtifactRequestValidationError{} + +// Validate checks the field values on UpdateArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *UpdateArtifactResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on UpdateArtifactResponse with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// UpdateArtifactResponseMultiError, or nil if none found. +func (m *UpdateArtifactResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *UpdateArtifactResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ArtifactId + + if len(errors) > 0 { + return UpdateArtifactResponseMultiError(errors) + } + + return nil +} + +// UpdateArtifactResponseMultiError is an error wrapping multiple validation +// errors returned by UpdateArtifactResponse.ValidateAll() if the designated +// constraints aren't met. +type UpdateArtifactResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m UpdateArtifactResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m UpdateArtifactResponseMultiError) AllErrors() []error { return m } + +// UpdateArtifactResponseValidationError is the validation error returned by +// UpdateArtifactResponse.Validate if the designated constraints aren't met. +type UpdateArtifactResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e UpdateArtifactResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e UpdateArtifactResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e UpdateArtifactResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e UpdateArtifactResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e UpdateArtifactResponseValidationError) ErrorName() string { + return "UpdateArtifactResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e UpdateArtifactResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sUpdateArtifactResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = UpdateArtifactResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = UpdateArtifactResponseValidationError{} + +// Validate checks the field values on ReservationID with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ReservationID) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReservationID with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ReservationIDMultiError, or +// nil if none found. +func (m *ReservationID) ValidateAll() error { + return m.validate(true) +} + +func (m *ReservationID) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetDatasetId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationIDValidationError{ + field: "DatasetId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationIDValidationError{ + field: "DatasetId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDatasetId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationIDValidationError{ + field: "DatasetId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for TagName + + if len(errors) > 0 { + return ReservationIDMultiError(errors) + } + + return nil +} + +// ReservationIDMultiError is an error wrapping multiple validation errors +// returned by ReservationID.ValidateAll() if the designated constraints +// aren't met. +type ReservationIDMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReservationIDMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReservationIDMultiError) AllErrors() []error { return m } + +// ReservationIDValidationError is the validation error returned by +// ReservationID.Validate if the designated constraints aren't met. +type ReservationIDValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReservationIDValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReservationIDValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReservationIDValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReservationIDValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReservationIDValidationError) ErrorName() string { return "ReservationIDValidationError" } + +// Error satisfies the builtin error interface +func (e ReservationIDValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReservationID.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReservationIDValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReservationIDValidationError{} + +// Validate checks the field values on GetOrExtendReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetOrExtendReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetOrExtendReservationRequest with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetOrExtendReservationRequestMultiError, or nil if none found. +func (m *GetOrExtendReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *GetOrExtendReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReservationId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReservationId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for OwnerId + + if all { + switch v := interface{}(m.GetHeartbeatInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeartbeatInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationRequestValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetOrExtendReservationRequestMultiError(errors) + } + + return nil +} + +// GetOrExtendReservationRequestMultiError is an error wrapping multiple +// validation errors returned by GetOrExtendReservationRequest.ValidateAll() +// if the designated constraints aren't met. +type GetOrExtendReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetOrExtendReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetOrExtendReservationRequestMultiError) AllErrors() []error { return m } + +// GetOrExtendReservationRequestValidationError is the validation error +// returned by GetOrExtendReservationRequest.Validate if the designated +// constraints aren't met. +type GetOrExtendReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetOrExtendReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetOrExtendReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetOrExtendReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetOrExtendReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetOrExtendReservationRequestValidationError) ErrorName() string { + return "GetOrExtendReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e GetOrExtendReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetOrExtendReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetOrExtendReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetOrExtendReservationRequestValidationError{} + +// Validate checks the field values on Reservation with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Reservation) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Reservation with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ReservationMultiError, or +// nil if none found. +func (m *Reservation) ValidateAll() error { + return m.validate(true) +} + +func (m *Reservation) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReservationId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReservationId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for OwnerId + + if all { + switch v := interface{}(m.GetHeartbeatInterval()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetHeartbeatInterval()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "HeartbeatInterval", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetExpiresAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExpiresAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "ExpiresAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReservationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReservationValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ReservationMultiError(errors) + } + + return nil +} + +// ReservationMultiError is an error wrapping multiple validation errors +// returned by Reservation.ValidateAll() if the designated constraints aren't met. +type ReservationMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReservationMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReservationMultiError) AllErrors() []error { return m } + +// ReservationValidationError is the validation error returned by +// Reservation.Validate if the designated constraints aren't met. +type ReservationValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReservationValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReservationValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReservationValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReservationValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReservationValidationError) ErrorName() string { return "ReservationValidationError" } + +// Error satisfies the builtin error interface +func (e ReservationValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReservation.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReservationValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReservationValidationError{} + +// Validate checks the field values on GetOrExtendReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *GetOrExtendReservationResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on GetOrExtendReservationResponse with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// GetOrExtendReservationResponseMultiError, or nil if none found. +func (m *GetOrExtendReservationResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *GetOrExtendReservationResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReservation()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReservation()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return GetOrExtendReservationResponseValidationError{ + field: "Reservation", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return GetOrExtendReservationResponseMultiError(errors) + } + + return nil +} + +// GetOrExtendReservationResponseMultiError is an error wrapping multiple +// validation errors returned by GetOrExtendReservationResponse.ValidateAll() +// if the designated constraints aren't met. +type GetOrExtendReservationResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m GetOrExtendReservationResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m GetOrExtendReservationResponseMultiError) AllErrors() []error { return m } + +// GetOrExtendReservationResponseValidationError is the validation error +// returned by GetOrExtendReservationResponse.Validate if the designated +// constraints aren't met. +type GetOrExtendReservationResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e GetOrExtendReservationResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e GetOrExtendReservationResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e GetOrExtendReservationResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e GetOrExtendReservationResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e GetOrExtendReservationResponseValidationError) ErrorName() string { + return "GetOrExtendReservationResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e GetOrExtendReservationResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sGetOrExtendReservationResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = GetOrExtendReservationResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = GetOrExtendReservationResponseValidationError{} + +// Validate checks the field values on ReleaseReservationRequest with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ReleaseReservationRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReleaseReservationRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ReleaseReservationRequestMultiError, or nil if none found. +func (m *ReleaseReservationRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ReleaseReservationRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetReservationId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ReleaseReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReservationId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ReleaseReservationRequestValidationError{ + field: "ReservationId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for OwnerId + + if len(errors) > 0 { + return ReleaseReservationRequestMultiError(errors) + } + + return nil +} + +// ReleaseReservationRequestMultiError is an error wrapping multiple validation +// errors returned by ReleaseReservationRequest.ValidateAll() if the +// designated constraints aren't met. +type ReleaseReservationRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReleaseReservationRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReleaseReservationRequestMultiError) AllErrors() []error { return m } + +// ReleaseReservationRequestValidationError is the validation error returned by +// ReleaseReservationRequest.Validate if the designated constraints aren't met. +type ReleaseReservationRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReleaseReservationRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReleaseReservationRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReleaseReservationRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReleaseReservationRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReleaseReservationRequestValidationError) ErrorName() string { + return "ReleaseReservationRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ReleaseReservationRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReleaseReservationRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReleaseReservationRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReleaseReservationRequestValidationError{} + +// Validate checks the field values on ReleaseReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ReleaseReservationResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ReleaseReservationResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ReleaseReservationResponseMultiError, or nil if none found. +func (m *ReleaseReservationResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ReleaseReservationResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ReleaseReservationResponseMultiError(errors) + } + + return nil +} + +// ReleaseReservationResponseMultiError is an error wrapping multiple +// validation errors returned by ReleaseReservationResponse.ValidateAll() if +// the designated constraints aren't met. +type ReleaseReservationResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ReleaseReservationResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ReleaseReservationResponseMultiError) AllErrors() []error { return m } + +// ReleaseReservationResponseValidationError is the validation error returned +// by ReleaseReservationResponse.Validate if the designated constraints aren't met. +type ReleaseReservationResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ReleaseReservationResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ReleaseReservationResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ReleaseReservationResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ReleaseReservationResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ReleaseReservationResponseValidationError) ErrorName() string { + return "ReleaseReservationResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ReleaseReservationResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sReleaseReservationResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ReleaseReservationResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ReleaseReservationResponseValidationError{} + +// Validate checks the field values on Dataset with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Dataset) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Dataset with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in DatasetMultiError, or nil if none found. +func (m *Dataset) ValidateAll() error { + return m.validate(true) +} + +func (m *Dataset) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatasetValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatasetValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatasetValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DatasetValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DatasetValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DatasetValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DatasetMultiError(errors) + } + + return nil +} + +// DatasetMultiError is an error wrapping multiple validation errors returned +// by Dataset.ValidateAll() if the designated constraints aren't met. +type DatasetMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatasetMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatasetMultiError) AllErrors() []error { return m } + +// DatasetValidationError is the validation error returned by Dataset.Validate +// if the designated constraints aren't met. +type DatasetValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatasetValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatasetValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatasetValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatasetValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatasetValidationError) ErrorName() string { return "DatasetValidationError" } + +// Error satisfies the builtin error interface +func (e DatasetValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDataset.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatasetValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatasetValidationError{} + +// Validate checks the field values on Partition with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Partition) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Partition with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PartitionMultiError, or nil +// if none found. +func (m *Partition) ValidateAll() error { + return m.validate(true) +} + +func (m *Partition) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return PartitionMultiError(errors) + } + + return nil +} + +// PartitionMultiError is an error wrapping multiple validation errors returned +// by Partition.ValidateAll() if the designated constraints aren't met. +type PartitionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PartitionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PartitionMultiError) AllErrors() []error { return m } + +// PartitionValidationError is the validation error returned by +// Partition.Validate if the designated constraints aren't met. +type PartitionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PartitionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PartitionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PartitionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PartitionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PartitionValidationError) ErrorName() string { return "PartitionValidationError" } + +// Error satisfies the builtin error interface +func (e PartitionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPartition.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PartitionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PartitionValidationError{} + +// Validate checks the field values on DatasetID with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *DatasetID) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatasetID with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in DatasetIDMultiError, or nil +// if none found. +func (m *DatasetID) ValidateAll() error { + return m.validate(true) +} + +func (m *DatasetID) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Project + + // no validation rules for Name + + // no validation rules for Domain + + // no validation rules for Version + + // no validation rules for UUID + + // no validation rules for Org + + if len(errors) > 0 { + return DatasetIDMultiError(errors) + } + + return nil +} + +// DatasetIDMultiError is an error wrapping multiple validation errors returned +// by DatasetID.ValidateAll() if the designated constraints aren't met. +type DatasetIDMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatasetIDMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatasetIDMultiError) AllErrors() []error { return m } + +// DatasetIDValidationError is the validation error returned by +// DatasetID.Validate if the designated constraints aren't met. +type DatasetIDValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatasetIDValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatasetIDValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatasetIDValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatasetIDValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatasetIDValidationError) ErrorName() string { return "DatasetIDValidationError" } + +// Error satisfies the builtin error interface +func (e DatasetIDValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatasetID.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatasetIDValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatasetIDValidationError{} + +// Validate checks the field values on Artifact with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Artifact) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Artifact with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ArtifactMultiError, or nil +// if none found. +func (m *Artifact) ValidateAll() error { + return m.validate(true) +} + +func (m *Artifact) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Id + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetData() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: fmt.Sprintf("Data[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetPartitions() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Partitions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Partitions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: fmt.Sprintf("Partitions[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetTags() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Tags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: fmt.Sprintf("Tags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: fmt.Sprintf("Tags[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCreatedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCreatedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactValidationError{ + field: "CreatedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ArtifactMultiError(errors) + } + + return nil +} + +// ArtifactMultiError is an error wrapping multiple validation errors returned +// by Artifact.ValidateAll() if the designated constraints aren't met. +type ArtifactMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ArtifactMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ArtifactMultiError) AllErrors() []error { return m } + +// ArtifactValidationError is the validation error returned by +// Artifact.Validate if the designated constraints aren't met. +type ArtifactValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ArtifactValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ArtifactValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ArtifactValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ArtifactValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ArtifactValidationError) ErrorName() string { return "ArtifactValidationError" } + +// Error satisfies the builtin error interface +func (e ArtifactValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sArtifact.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ArtifactValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ArtifactValidationError{} + +// Validate checks the field values on ArtifactData with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ArtifactData) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ArtifactData with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ArtifactDataMultiError, or +// nil if none found. +func (m *ArtifactData) ValidateAll() error { + return m.validate(true) +} + +func (m *ArtifactData) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + if all { + switch v := interface{}(m.GetValue()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ArtifactDataValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ArtifactDataValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetValue()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ArtifactDataValidationError{ + field: "Value", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ArtifactDataMultiError(errors) + } + + return nil +} + +// ArtifactDataMultiError is an error wrapping multiple validation errors +// returned by ArtifactData.ValidateAll() if the designated constraints aren't met. +type ArtifactDataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ArtifactDataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ArtifactDataMultiError) AllErrors() []error { return m } + +// ArtifactDataValidationError is the validation error returned by +// ArtifactData.Validate if the designated constraints aren't met. +type ArtifactDataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ArtifactDataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ArtifactDataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ArtifactDataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ArtifactDataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ArtifactDataValidationError) ErrorName() string { return "ArtifactDataValidationError" } + +// Error satisfies the builtin error interface +func (e ArtifactDataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sArtifactData.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ArtifactDataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ArtifactDataValidationError{} + +// Validate checks the field values on Tag with the rules defined in the proto +// definition for this message. If any rules are violated, the first error +// encountered is returned, or nil if there are no violations. +func (m *Tag) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Tag with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in TagMultiError, or nil if none found. +func (m *Tag) ValidateAll() error { + return m.validate(true) +} + +func (m *Tag) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Name + + // no validation rules for ArtifactId + + if all { + switch v := interface{}(m.GetDataset()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TagValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TagValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDataset()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TagValidationError{ + field: "Dataset", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return TagMultiError(errors) + } + + return nil +} + +// TagMultiError is an error wrapping multiple validation errors returned by +// Tag.ValidateAll() if the designated constraints aren't met. +type TagMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TagMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TagMultiError) AllErrors() []error { return m } + +// TagValidationError is the validation error returned by Tag.Validate if the +// designated constraints aren't met. +type TagValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TagValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TagValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TagValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TagValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TagValidationError) ErrorName() string { return "TagValidationError" } + +// Error satisfies the builtin error interface +func (e TagValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTag.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TagValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TagValidationError{} + +// Validate checks the field values on Metadata with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *Metadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on Metadata with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in MetadataMultiError, or nil +// if none found. +func (m *Metadata) ValidateAll() error { + return m.validate(true) +} + +func (m *Metadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for KeyMap + + if len(errors) > 0 { + return MetadataMultiError(errors) + } + + return nil +} + +// MetadataMultiError is an error wrapping multiple validation errors returned +// by Metadata.ValidateAll() if the designated constraints aren't met. +type MetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m MetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m MetadataMultiError) AllErrors() []error { return m } + +// MetadataValidationError is the validation error returned by +// Metadata.Validate if the designated constraints aren't met. +type MetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e MetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e MetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e MetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e MetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e MetadataValidationError) ErrorName() string { return "MetadataValidationError" } + +// Error satisfies the builtin error interface +func (e MetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = MetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = MetadataValidationError{} + +// Validate checks the field values on FilterExpression with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *FilterExpression) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on FilterExpression with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// FilterExpressionMultiError, or nil if none found. +func (m *FilterExpression) ValidateAll() error { + return m.validate(true) +} + +func (m *FilterExpression) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetFilters() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, FilterExpressionValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, FilterExpressionValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return FilterExpressionValidationError{ + field: fmt.Sprintf("Filters[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return FilterExpressionMultiError(errors) + } + + return nil +} + +// FilterExpressionMultiError is an error wrapping multiple validation errors +// returned by FilterExpression.ValidateAll() if the designated constraints +// aren't met. +type FilterExpressionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m FilterExpressionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m FilterExpressionMultiError) AllErrors() []error { return m } + +// FilterExpressionValidationError is the validation error returned by +// FilterExpression.Validate if the designated constraints aren't met. +type FilterExpressionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e FilterExpressionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e FilterExpressionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e FilterExpressionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e FilterExpressionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e FilterExpressionValidationError) ErrorName() string { return "FilterExpressionValidationError" } + +// Error satisfies the builtin error interface +func (e FilterExpressionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sFilterExpression.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = FilterExpressionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = FilterExpressionValidationError{} + +// Validate checks the field values on SinglePropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *SinglePropertyFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on SinglePropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// SinglePropertyFilterMultiError, or nil if none found. +func (m *SinglePropertyFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *SinglePropertyFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Operator + + switch v := m.PropertyFilter.(type) { + case *SinglePropertyFilter_TagFilter: + if v == nil { + err := SinglePropertyFilterValidationError{ + field: "PropertyFilter", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTagFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "TagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "TagFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTagFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SinglePropertyFilterValidationError{ + field: "TagFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SinglePropertyFilter_PartitionFilter: + if v == nil { + err := SinglePropertyFilterValidationError{ + field: "PropertyFilter", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetPartitionFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "PartitionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "PartitionFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPartitionFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SinglePropertyFilterValidationError{ + field: "PartitionFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SinglePropertyFilter_ArtifactFilter: + if v == nil { + err := SinglePropertyFilterValidationError{ + field: "PropertyFilter", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetArtifactFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "ArtifactFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "ArtifactFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetArtifactFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SinglePropertyFilterValidationError{ + field: "ArtifactFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *SinglePropertyFilter_DatasetFilter: + if v == nil { + err := SinglePropertyFilterValidationError{ + field: "PropertyFilter", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetDatasetFilter()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "DatasetFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, SinglePropertyFilterValidationError{ + field: "DatasetFilter", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetDatasetFilter()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return SinglePropertyFilterValidationError{ + field: "DatasetFilter", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return SinglePropertyFilterMultiError(errors) + } + + return nil +} + +// SinglePropertyFilterMultiError is an error wrapping multiple validation +// errors returned by SinglePropertyFilter.ValidateAll() if the designated +// constraints aren't met. +type SinglePropertyFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m SinglePropertyFilterMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m SinglePropertyFilterMultiError) AllErrors() []error { return m } + +// SinglePropertyFilterValidationError is the validation error returned by +// SinglePropertyFilter.Validate if the designated constraints aren't met. +type SinglePropertyFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e SinglePropertyFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e SinglePropertyFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e SinglePropertyFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e SinglePropertyFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e SinglePropertyFilterValidationError) ErrorName() string { + return "SinglePropertyFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e SinglePropertyFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sSinglePropertyFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = SinglePropertyFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = SinglePropertyFilterValidationError{} + +// Validate checks the field values on ArtifactPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ArtifactPropertyFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ArtifactPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ArtifactPropertyFilterMultiError, or nil if none found. +func (m *ArtifactPropertyFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *ArtifactPropertyFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Property.(type) { + case *ArtifactPropertyFilter_ArtifactId: + if v == nil { + err := ArtifactPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for ArtifactId + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ArtifactPropertyFilterMultiError(errors) + } + + return nil +} + +// ArtifactPropertyFilterMultiError is an error wrapping multiple validation +// errors returned by ArtifactPropertyFilter.ValidateAll() if the designated +// constraints aren't met. +type ArtifactPropertyFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ArtifactPropertyFilterMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ArtifactPropertyFilterMultiError) AllErrors() []error { return m } + +// ArtifactPropertyFilterValidationError is the validation error returned by +// ArtifactPropertyFilter.Validate if the designated constraints aren't met. +type ArtifactPropertyFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ArtifactPropertyFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ArtifactPropertyFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ArtifactPropertyFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ArtifactPropertyFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ArtifactPropertyFilterValidationError) ErrorName() string { + return "ArtifactPropertyFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e ArtifactPropertyFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sArtifactPropertyFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ArtifactPropertyFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ArtifactPropertyFilterValidationError{} + +// Validate checks the field values on TagPropertyFilter with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TagPropertyFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TagPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TagPropertyFilterMultiError, or nil if none found. +func (m *TagPropertyFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *TagPropertyFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Property.(type) { + case *TagPropertyFilter_TagName: + if v == nil { + err := TagPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for TagName + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TagPropertyFilterMultiError(errors) + } + + return nil +} + +// TagPropertyFilterMultiError is an error wrapping multiple validation errors +// returned by TagPropertyFilter.ValidateAll() if the designated constraints +// aren't met. +type TagPropertyFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TagPropertyFilterMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TagPropertyFilterMultiError) AllErrors() []error { return m } + +// TagPropertyFilterValidationError is the validation error returned by +// TagPropertyFilter.Validate if the designated constraints aren't met. +type TagPropertyFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TagPropertyFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TagPropertyFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TagPropertyFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TagPropertyFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TagPropertyFilterValidationError) ErrorName() string { + return "TagPropertyFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e TagPropertyFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTagPropertyFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TagPropertyFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TagPropertyFilterValidationError{} + +// Validate checks the field values on PartitionPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *PartitionPropertyFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PartitionPropertyFilter with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PartitionPropertyFilterMultiError, or nil if none found. +func (m *PartitionPropertyFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *PartitionPropertyFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Property.(type) { + case *PartitionPropertyFilter_KeyVal: + if v == nil { + err := PartitionPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetKeyVal()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, PartitionPropertyFilterValidationError{ + field: "KeyVal", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, PartitionPropertyFilterValidationError{ + field: "KeyVal", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetKeyVal()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return PartitionPropertyFilterValidationError{ + field: "KeyVal", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return PartitionPropertyFilterMultiError(errors) + } + + return nil +} + +// PartitionPropertyFilterMultiError is an error wrapping multiple validation +// errors returned by PartitionPropertyFilter.ValidateAll() if the designated +// constraints aren't met. +type PartitionPropertyFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PartitionPropertyFilterMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PartitionPropertyFilterMultiError) AllErrors() []error { return m } + +// PartitionPropertyFilterValidationError is the validation error returned by +// PartitionPropertyFilter.Validate if the designated constraints aren't met. +type PartitionPropertyFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PartitionPropertyFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PartitionPropertyFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PartitionPropertyFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PartitionPropertyFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PartitionPropertyFilterValidationError) ErrorName() string { + return "PartitionPropertyFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e PartitionPropertyFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPartitionPropertyFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PartitionPropertyFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PartitionPropertyFilterValidationError{} + +// Validate checks the field values on KeyValuePair with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *KeyValuePair) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on KeyValuePair with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in KeyValuePairMultiError, or +// nil if none found. +func (m *KeyValuePair) ValidateAll() error { + return m.validate(true) +} + +func (m *KeyValuePair) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Key + + // no validation rules for Value + + if len(errors) > 0 { + return KeyValuePairMultiError(errors) + } + + return nil +} + +// KeyValuePairMultiError is an error wrapping multiple validation errors +// returned by KeyValuePair.ValidateAll() if the designated constraints aren't met. +type KeyValuePairMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m KeyValuePairMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m KeyValuePairMultiError) AllErrors() []error { return m } + +// KeyValuePairValidationError is the validation error returned by +// KeyValuePair.Validate if the designated constraints aren't met. +type KeyValuePairValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e KeyValuePairValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e KeyValuePairValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e KeyValuePairValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e KeyValuePairValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e KeyValuePairValidationError) ErrorName() string { return "KeyValuePairValidationError" } + +// Error satisfies the builtin error interface +func (e KeyValuePairValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sKeyValuePair.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = KeyValuePairValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = KeyValuePairValidationError{} + +// Validate checks the field values on DatasetPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DatasetPropertyFilter) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DatasetPropertyFilter with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DatasetPropertyFilterMultiError, or nil if none found. +func (m *DatasetPropertyFilter) ValidateAll() error { + return m.validate(true) +} + +func (m *DatasetPropertyFilter) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + switch v := m.Property.(type) { + case *DatasetPropertyFilter_Project: + if v == nil { + err := DatasetPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Project + case *DatasetPropertyFilter_Name: + if v == nil { + err := DatasetPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Name + case *DatasetPropertyFilter_Domain: + if v == nil { + err := DatasetPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Domain + case *DatasetPropertyFilter_Version: + if v == nil { + err := DatasetPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Version + case *DatasetPropertyFilter_Org: + if v == nil { + err := DatasetPropertyFilterValidationError{ + field: "Property", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for Org + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return DatasetPropertyFilterMultiError(errors) + } + + return nil +} + +// DatasetPropertyFilterMultiError is an error wrapping multiple validation +// errors returned by DatasetPropertyFilter.ValidateAll() if the designated +// constraints aren't met. +type DatasetPropertyFilterMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DatasetPropertyFilterMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DatasetPropertyFilterMultiError) AllErrors() []error { return m } + +// DatasetPropertyFilterValidationError is the validation error returned by +// DatasetPropertyFilter.Validate if the designated constraints aren't met. +type DatasetPropertyFilterValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DatasetPropertyFilterValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DatasetPropertyFilterValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DatasetPropertyFilterValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DatasetPropertyFilterValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DatasetPropertyFilterValidationError) ErrorName() string { + return "DatasetPropertyFilterValidationError" +} + +// Error satisfies the builtin error interface +func (e DatasetPropertyFilterValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDatasetPropertyFilter.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DatasetPropertyFilterValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DatasetPropertyFilterValidationError{} + +// Validate checks the field values on PaginationOptions with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *PaginationOptions) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PaginationOptions with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// PaginationOptionsMultiError, or nil if none found. +func (m *PaginationOptions) ValidateAll() error { + return m.validate(true) +} + +func (m *PaginationOptions) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Limit + + // no validation rules for Token + + // no validation rules for SortKey + + // no validation rules for SortOrder + + if len(errors) > 0 { + return PaginationOptionsMultiError(errors) + } + + return nil +} + +// PaginationOptionsMultiError is an error wrapping multiple validation errors +// returned by PaginationOptions.ValidateAll() if the designated constraints +// aren't met. +type PaginationOptionsMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PaginationOptionsMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PaginationOptionsMultiError) AllErrors() []error { return m } + +// PaginationOptionsValidationError is the validation error returned by +// PaginationOptions.Validate if the designated constraints aren't met. +type PaginationOptionsValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PaginationOptionsValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PaginationOptionsValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PaginationOptionsValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PaginationOptionsValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PaginationOptionsValidationError) ErrorName() string { + return "PaginationOptionsValidationError" +} + +// Error satisfies the builtin error interface +func (e PaginationOptionsValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPaginationOptions.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PaginationOptionsValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PaginationOptionsValidationError{} diff --git a/gen/go/flyteidl2/datacatalog/datacatalog_grpc.pb.go b/gen/go/flyteidl2/datacatalog/datacatalog_grpc.pb.go new file mode 100644 index 0000000000..61fbec81b0 --- /dev/null +++ b/gen/go/flyteidl2/datacatalog/datacatalog_grpc.pb.go @@ -0,0 +1,486 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.3.0 +// - protoc (unknown) +// source: flyteidl2/datacatalog/datacatalog.proto + +package datacatalog + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +const ( + DataCatalog_CreateDataset_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/CreateDataset" + DataCatalog_GetDataset_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/GetDataset" + DataCatalog_CreateArtifact_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/CreateArtifact" + DataCatalog_GetArtifact_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/GetArtifact" + DataCatalog_AddTag_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/AddTag" + DataCatalog_ListArtifacts_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/ListArtifacts" + DataCatalog_ListDatasets_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/ListDatasets" + DataCatalog_UpdateArtifact_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/UpdateArtifact" + DataCatalog_GetOrExtendReservation_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation" + DataCatalog_ReleaseReservation_FullMethodName = "/flyteidl2.datacatalog.DataCatalog/ReleaseReservation" +) + +// DataCatalogClient is the client API for DataCatalog service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type DataCatalogClient interface { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*CreateDatasetResponse, error) + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*GetDatasetResponse, error) + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + CreateArtifact(ctx context.Context, in *CreateArtifactRequest, opts ...grpc.CallOption) (*CreateArtifactResponse, error) + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + GetArtifact(ctx context.Context, in *GetArtifactRequest, opts ...grpc.CallOption) (*GetArtifactResponse, error) + // Associate a tag with an artifact. Tags are unique within a Dataset. + AddTag(ctx context.Context, in *AddTagRequest, opts ...grpc.CallOption) (*AddTagResponse, error) + // Return a paginated list of artifacts + ListArtifacts(ctx context.Context, in *ListArtifactsRequest, opts ...grpc.CallOption) (*ListArtifactsResponse, error) + // Return a paginated list of datasets + ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error) + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + UpdateArtifact(ctx context.Context, in *UpdateArtifactRequest, opts ...grpc.CallOption) (*UpdateArtifactResponse, error) + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*GetOrExtendReservationResponse, error) + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*ReleaseReservationResponse, error) +} + +type dataCatalogClient struct { + cc grpc.ClientConnInterface +} + +func NewDataCatalogClient(cc grpc.ClientConnInterface) DataCatalogClient { + return &dataCatalogClient{cc} +} + +func (c *dataCatalogClient) CreateDataset(ctx context.Context, in *CreateDatasetRequest, opts ...grpc.CallOption) (*CreateDatasetResponse, error) { + out := new(CreateDatasetResponse) + err := c.cc.Invoke(ctx, DataCatalog_CreateDataset_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) GetDataset(ctx context.Context, in *GetDatasetRequest, opts ...grpc.CallOption) (*GetDatasetResponse, error) { + out := new(GetDatasetResponse) + err := c.cc.Invoke(ctx, DataCatalog_GetDataset_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) CreateArtifact(ctx context.Context, in *CreateArtifactRequest, opts ...grpc.CallOption) (*CreateArtifactResponse, error) { + out := new(CreateArtifactResponse) + err := c.cc.Invoke(ctx, DataCatalog_CreateArtifact_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) GetArtifact(ctx context.Context, in *GetArtifactRequest, opts ...grpc.CallOption) (*GetArtifactResponse, error) { + out := new(GetArtifactResponse) + err := c.cc.Invoke(ctx, DataCatalog_GetArtifact_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) AddTag(ctx context.Context, in *AddTagRequest, opts ...grpc.CallOption) (*AddTagResponse, error) { + out := new(AddTagResponse) + err := c.cc.Invoke(ctx, DataCatalog_AddTag_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) ListArtifacts(ctx context.Context, in *ListArtifactsRequest, opts ...grpc.CallOption) (*ListArtifactsResponse, error) { + out := new(ListArtifactsResponse) + err := c.cc.Invoke(ctx, DataCatalog_ListArtifacts_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) ListDatasets(ctx context.Context, in *ListDatasetsRequest, opts ...grpc.CallOption) (*ListDatasetsResponse, error) { + out := new(ListDatasetsResponse) + err := c.cc.Invoke(ctx, DataCatalog_ListDatasets_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) UpdateArtifact(ctx context.Context, in *UpdateArtifactRequest, opts ...grpc.CallOption) (*UpdateArtifactResponse, error) { + out := new(UpdateArtifactResponse) + err := c.cc.Invoke(ctx, DataCatalog_UpdateArtifact_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) GetOrExtendReservation(ctx context.Context, in *GetOrExtendReservationRequest, opts ...grpc.CallOption) (*GetOrExtendReservationResponse, error) { + out := new(GetOrExtendReservationResponse) + err := c.cc.Invoke(ctx, DataCatalog_GetOrExtendReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *dataCatalogClient) ReleaseReservation(ctx context.Context, in *ReleaseReservationRequest, opts ...grpc.CallOption) (*ReleaseReservationResponse, error) { + out := new(ReleaseReservationResponse) + err := c.cc.Invoke(ctx, DataCatalog_ReleaseReservation_FullMethodName, in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DataCatalogServer is the server API for DataCatalog service. +// All implementations should embed UnimplementedDataCatalogServer +// for forward compatibility +type DataCatalogServer interface { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + CreateDataset(context.Context, *CreateDatasetRequest) (*CreateDatasetResponse, error) + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + GetDataset(context.Context, *GetDatasetRequest) (*GetDatasetResponse, error) + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + CreateArtifact(context.Context, *CreateArtifactRequest) (*CreateArtifactResponse, error) + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + GetArtifact(context.Context, *GetArtifactRequest) (*GetArtifactResponse, error) + // Associate a tag with an artifact. Tags are unique within a Dataset. + AddTag(context.Context, *AddTagRequest) (*AddTagResponse, error) + // Return a paginated list of artifacts + ListArtifacts(context.Context, *ListArtifactsRequest) (*ListArtifactsResponse, error) + // Return a paginated list of datasets + ListDatasets(context.Context, *ListDatasetsRequest) (*ListDatasetsResponse, error) + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + UpdateArtifact(context.Context, *UpdateArtifactRequest) (*UpdateArtifactResponse, error) + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*GetOrExtendReservationResponse, error) + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + ReleaseReservation(context.Context, *ReleaseReservationRequest) (*ReleaseReservationResponse, error) +} + +// UnimplementedDataCatalogServer should be embedded to have forward compatible implementations. +type UnimplementedDataCatalogServer struct { +} + +func (UnimplementedDataCatalogServer) CreateDataset(context.Context, *CreateDatasetRequest) (*CreateDatasetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDataset not implemented") +} +func (UnimplementedDataCatalogServer) GetDataset(context.Context, *GetDatasetRequest) (*GetDatasetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetDataset not implemented") +} +func (UnimplementedDataCatalogServer) CreateArtifact(context.Context, *CreateArtifactRequest) (*CreateArtifactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateArtifact not implemented") +} +func (UnimplementedDataCatalogServer) GetArtifact(context.Context, *GetArtifactRequest) (*GetArtifactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetArtifact not implemented") +} +func (UnimplementedDataCatalogServer) AddTag(context.Context, *AddTagRequest) (*AddTagResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AddTag not implemented") +} +func (UnimplementedDataCatalogServer) ListArtifacts(context.Context, *ListArtifactsRequest) (*ListArtifactsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListArtifacts not implemented") +} +func (UnimplementedDataCatalogServer) ListDatasets(context.Context, *ListDatasetsRequest) (*ListDatasetsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListDatasets not implemented") +} +func (UnimplementedDataCatalogServer) UpdateArtifact(context.Context, *UpdateArtifactRequest) (*UpdateArtifactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateArtifact not implemented") +} +func (UnimplementedDataCatalogServer) GetOrExtendReservation(context.Context, *GetOrExtendReservationRequest) (*GetOrExtendReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetOrExtendReservation not implemented") +} +func (UnimplementedDataCatalogServer) ReleaseReservation(context.Context, *ReleaseReservationRequest) (*ReleaseReservationResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ReleaseReservation not implemented") +} + +// UnsafeDataCatalogServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DataCatalogServer will +// result in compilation errors. +type UnsafeDataCatalogServer interface { + mustEmbedUnimplementedDataCatalogServer() +} + +func RegisterDataCatalogServer(s grpc.ServiceRegistrar, srv DataCatalogServer) { + s.RegisterService(&DataCatalog_ServiceDesc, srv) +} + +func _DataCatalog_CreateDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).CreateDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_CreateDataset_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).CreateDataset(ctx, req.(*CreateDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_GetDataset_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDatasetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).GetDataset(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_GetDataset_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).GetDataset(ctx, req.(*GetDatasetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_CreateArtifact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateArtifactRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).CreateArtifact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_CreateArtifact_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).CreateArtifact(ctx, req.(*CreateArtifactRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_GetArtifact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetArtifactRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).GetArtifact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_GetArtifact_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).GetArtifact(ctx, req.(*GetArtifactRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_AddTag_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AddTagRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).AddTag(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_AddTag_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).AddTag(ctx, req.(*AddTagRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_ListArtifacts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListArtifactsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).ListArtifacts(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_ListArtifacts_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).ListArtifacts(ctx, req.(*ListArtifactsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_ListDatasets_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDatasetsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).ListDatasets(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_ListDatasets_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).ListDatasets(ctx, req.(*ListDatasetsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_UpdateArtifact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateArtifactRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).UpdateArtifact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_UpdateArtifact_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).UpdateArtifact(ctx, req.(*UpdateArtifactRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_GetOrExtendReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetOrExtendReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).GetOrExtendReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_GetOrExtendReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).GetOrExtendReservation(ctx, req.(*GetOrExtendReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DataCatalog_ReleaseReservation_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReleaseReservationRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DataCatalogServer).ReleaseReservation(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: DataCatalog_ReleaseReservation_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DataCatalogServer).ReleaseReservation(ctx, req.(*ReleaseReservationRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// DataCatalog_ServiceDesc is the grpc.ServiceDesc for DataCatalog service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var DataCatalog_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "flyteidl2.datacatalog.DataCatalog", + HandlerType: (*DataCatalogServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDataset", + Handler: _DataCatalog_CreateDataset_Handler, + }, + { + MethodName: "GetDataset", + Handler: _DataCatalog_GetDataset_Handler, + }, + { + MethodName: "CreateArtifact", + Handler: _DataCatalog_CreateArtifact_Handler, + }, + { + MethodName: "GetArtifact", + Handler: _DataCatalog_GetArtifact_Handler, + }, + { + MethodName: "AddTag", + Handler: _DataCatalog_AddTag_Handler, + }, + { + MethodName: "ListArtifacts", + Handler: _DataCatalog_ListArtifacts_Handler, + }, + { + MethodName: "ListDatasets", + Handler: _DataCatalog_ListDatasets_Handler, + }, + { + MethodName: "UpdateArtifact", + Handler: _DataCatalog_UpdateArtifact_Handler, + }, + { + MethodName: "GetOrExtendReservation", + Handler: _DataCatalog_GetOrExtendReservation_Handler, + }, + { + MethodName: "ReleaseReservation", + Handler: _DataCatalog_ReleaseReservation_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "flyteidl2/datacatalog/datacatalog.proto", +} diff --git a/gen/go/flyteidl2/datacatalog/datacatalogconnect/datacatalog.connect.go b/gen/go/flyteidl2/datacatalog/datacatalogconnect/datacatalog.connect.go new file mode 100644 index 0000000000..611fea5ae4 --- /dev/null +++ b/gen/go/flyteidl2/datacatalog/datacatalogconnect/datacatalog.connect.go @@ -0,0 +1,426 @@ +// Code generated by protoc-gen-connect-go. DO NOT EDIT. +// +// Source: flyteidl2/datacatalog/datacatalog.proto + +package datacatalogconnect + +import ( + connect "connectrpc.com/connect" + context "context" + errors "errors" + datacatalog "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog" + http "net/http" + strings "strings" +) + +// This is a compile-time assertion to ensure that this generated file and the connect package are +// compatible. If you get a compiler error that this constant is not defined, this code was +// generated with a version of connect newer than the one compiled into your binary. You can fix the +// problem by either regenerating this code with an older version of connect or updating the connect +// version compiled into your binary. +const _ = connect.IsAtLeastVersion1_13_0 + +const ( + // DataCatalogName is the fully-qualified name of the DataCatalog service. + DataCatalogName = "flyteidl2.datacatalog.DataCatalog" +) + +// These constants are the fully-qualified names of the RPCs defined in this package. They're +// exposed at runtime as Spec.Procedure and as the final two segments of the HTTP route. +// +// Note that these are different from the fully-qualified method names used by +// google.golang.org/protobuf/reflect/protoreflect. To convert from these constants to +// reflection-formatted method names, remove the leading slash and convert the remaining slash to a +// period. +const ( + // DataCatalogCreateDatasetProcedure is the fully-qualified name of the DataCatalog's CreateDataset + // RPC. + DataCatalogCreateDatasetProcedure = "/flyteidl2.datacatalog.DataCatalog/CreateDataset" + // DataCatalogGetDatasetProcedure is the fully-qualified name of the DataCatalog's GetDataset RPC. + DataCatalogGetDatasetProcedure = "/flyteidl2.datacatalog.DataCatalog/GetDataset" + // DataCatalogCreateArtifactProcedure is the fully-qualified name of the DataCatalog's + // CreateArtifact RPC. + DataCatalogCreateArtifactProcedure = "/flyteidl2.datacatalog.DataCatalog/CreateArtifact" + // DataCatalogGetArtifactProcedure is the fully-qualified name of the DataCatalog's GetArtifact RPC. + DataCatalogGetArtifactProcedure = "/flyteidl2.datacatalog.DataCatalog/GetArtifact" + // DataCatalogAddTagProcedure is the fully-qualified name of the DataCatalog's AddTag RPC. + DataCatalogAddTagProcedure = "/flyteidl2.datacatalog.DataCatalog/AddTag" + // DataCatalogListArtifactsProcedure is the fully-qualified name of the DataCatalog's ListArtifacts + // RPC. + DataCatalogListArtifactsProcedure = "/flyteidl2.datacatalog.DataCatalog/ListArtifacts" + // DataCatalogListDatasetsProcedure is the fully-qualified name of the DataCatalog's ListDatasets + // RPC. + DataCatalogListDatasetsProcedure = "/flyteidl2.datacatalog.DataCatalog/ListDatasets" + // DataCatalogUpdateArtifactProcedure is the fully-qualified name of the DataCatalog's + // UpdateArtifact RPC. + DataCatalogUpdateArtifactProcedure = "/flyteidl2.datacatalog.DataCatalog/UpdateArtifact" + // DataCatalogGetOrExtendReservationProcedure is the fully-qualified name of the DataCatalog's + // GetOrExtendReservation RPC. + DataCatalogGetOrExtendReservationProcedure = "/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation" + // DataCatalogReleaseReservationProcedure is the fully-qualified name of the DataCatalog's + // ReleaseReservation RPC. + DataCatalogReleaseReservationProcedure = "/flyteidl2.datacatalog.DataCatalog/ReleaseReservation" +) + +// These variables are the protoreflect.Descriptor objects for the RPCs defined in this package. +var ( + dataCatalogServiceDescriptor = datacatalog.File_flyteidl2_datacatalog_datacatalog_proto.Services().ByName("DataCatalog") + dataCatalogCreateDatasetMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("CreateDataset") + dataCatalogGetDatasetMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("GetDataset") + dataCatalogCreateArtifactMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("CreateArtifact") + dataCatalogGetArtifactMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("GetArtifact") + dataCatalogAddTagMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("AddTag") + dataCatalogListArtifactsMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("ListArtifacts") + dataCatalogListDatasetsMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("ListDatasets") + dataCatalogUpdateArtifactMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("UpdateArtifact") + dataCatalogGetOrExtendReservationMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("GetOrExtendReservation") + dataCatalogReleaseReservationMethodDescriptor = dataCatalogServiceDescriptor.Methods().ByName("ReleaseReservation") +) + +// DataCatalogClient is a client for the flyteidl2.datacatalog.DataCatalog service. +type DataCatalogClient interface { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + CreateDataset(context.Context, *connect.Request[datacatalog.CreateDatasetRequest]) (*connect.Response[datacatalog.CreateDatasetResponse], error) + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + GetDataset(context.Context, *connect.Request[datacatalog.GetDatasetRequest]) (*connect.Response[datacatalog.GetDatasetResponse], error) + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + CreateArtifact(context.Context, *connect.Request[datacatalog.CreateArtifactRequest]) (*connect.Response[datacatalog.CreateArtifactResponse], error) + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + GetArtifact(context.Context, *connect.Request[datacatalog.GetArtifactRequest]) (*connect.Response[datacatalog.GetArtifactResponse], error) + // Associate a tag with an artifact. Tags are unique within a Dataset. + AddTag(context.Context, *connect.Request[datacatalog.AddTagRequest]) (*connect.Response[datacatalog.AddTagResponse], error) + // Return a paginated list of artifacts + ListArtifacts(context.Context, *connect.Request[datacatalog.ListArtifactsRequest]) (*connect.Response[datacatalog.ListArtifactsResponse], error) + // Return a paginated list of datasets + ListDatasets(context.Context, *connect.Request[datacatalog.ListDatasetsRequest]) (*connect.Response[datacatalog.ListDatasetsResponse], error) + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + UpdateArtifact(context.Context, *connect.Request[datacatalog.UpdateArtifactRequest]) (*connect.Response[datacatalog.UpdateArtifactResponse], error) + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + GetOrExtendReservation(context.Context, *connect.Request[datacatalog.GetOrExtendReservationRequest]) (*connect.Response[datacatalog.GetOrExtendReservationResponse], error) + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + ReleaseReservation(context.Context, *connect.Request[datacatalog.ReleaseReservationRequest]) (*connect.Response[datacatalog.ReleaseReservationResponse], error) +} + +// NewDataCatalogClient constructs a client for the flyteidl2.datacatalog.DataCatalog service. By +// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, +// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the +// connect.WithGRPC() or connect.WithGRPCWeb() options. +// +// The URL supplied here should be the base URL for the Connect or gRPC server (for example, +// http://api.acme.com or https://acme.com/grpc). +func NewDataCatalogClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) DataCatalogClient { + baseURL = strings.TrimRight(baseURL, "/") + return &dataCatalogClient{ + createDataset: connect.NewClient[datacatalog.CreateDatasetRequest, datacatalog.CreateDatasetResponse]( + httpClient, + baseURL+DataCatalogCreateDatasetProcedure, + connect.WithSchema(dataCatalogCreateDatasetMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getDataset: connect.NewClient[datacatalog.GetDatasetRequest, datacatalog.GetDatasetResponse]( + httpClient, + baseURL+DataCatalogGetDatasetProcedure, + connect.WithSchema(dataCatalogGetDatasetMethodDescriptor), + connect.WithClientOptions(opts...), + ), + createArtifact: connect.NewClient[datacatalog.CreateArtifactRequest, datacatalog.CreateArtifactResponse]( + httpClient, + baseURL+DataCatalogCreateArtifactProcedure, + connect.WithSchema(dataCatalogCreateArtifactMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getArtifact: connect.NewClient[datacatalog.GetArtifactRequest, datacatalog.GetArtifactResponse]( + httpClient, + baseURL+DataCatalogGetArtifactProcedure, + connect.WithSchema(dataCatalogGetArtifactMethodDescriptor), + connect.WithClientOptions(opts...), + ), + addTag: connect.NewClient[datacatalog.AddTagRequest, datacatalog.AddTagResponse]( + httpClient, + baseURL+DataCatalogAddTagProcedure, + connect.WithSchema(dataCatalogAddTagMethodDescriptor), + connect.WithClientOptions(opts...), + ), + listArtifacts: connect.NewClient[datacatalog.ListArtifactsRequest, datacatalog.ListArtifactsResponse]( + httpClient, + baseURL+DataCatalogListArtifactsProcedure, + connect.WithSchema(dataCatalogListArtifactsMethodDescriptor), + connect.WithClientOptions(opts...), + ), + listDatasets: connect.NewClient[datacatalog.ListDatasetsRequest, datacatalog.ListDatasetsResponse]( + httpClient, + baseURL+DataCatalogListDatasetsProcedure, + connect.WithSchema(dataCatalogListDatasetsMethodDescriptor), + connect.WithClientOptions(opts...), + ), + updateArtifact: connect.NewClient[datacatalog.UpdateArtifactRequest, datacatalog.UpdateArtifactResponse]( + httpClient, + baseURL+DataCatalogUpdateArtifactProcedure, + connect.WithSchema(dataCatalogUpdateArtifactMethodDescriptor), + connect.WithClientOptions(opts...), + ), + getOrExtendReservation: connect.NewClient[datacatalog.GetOrExtendReservationRequest, datacatalog.GetOrExtendReservationResponse]( + httpClient, + baseURL+DataCatalogGetOrExtendReservationProcedure, + connect.WithSchema(dataCatalogGetOrExtendReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + releaseReservation: connect.NewClient[datacatalog.ReleaseReservationRequest, datacatalog.ReleaseReservationResponse]( + httpClient, + baseURL+DataCatalogReleaseReservationProcedure, + connect.WithSchema(dataCatalogReleaseReservationMethodDescriptor), + connect.WithClientOptions(opts...), + ), + } +} + +// dataCatalogClient implements DataCatalogClient. +type dataCatalogClient struct { + createDataset *connect.Client[datacatalog.CreateDatasetRequest, datacatalog.CreateDatasetResponse] + getDataset *connect.Client[datacatalog.GetDatasetRequest, datacatalog.GetDatasetResponse] + createArtifact *connect.Client[datacatalog.CreateArtifactRequest, datacatalog.CreateArtifactResponse] + getArtifact *connect.Client[datacatalog.GetArtifactRequest, datacatalog.GetArtifactResponse] + addTag *connect.Client[datacatalog.AddTagRequest, datacatalog.AddTagResponse] + listArtifacts *connect.Client[datacatalog.ListArtifactsRequest, datacatalog.ListArtifactsResponse] + listDatasets *connect.Client[datacatalog.ListDatasetsRequest, datacatalog.ListDatasetsResponse] + updateArtifact *connect.Client[datacatalog.UpdateArtifactRequest, datacatalog.UpdateArtifactResponse] + getOrExtendReservation *connect.Client[datacatalog.GetOrExtendReservationRequest, datacatalog.GetOrExtendReservationResponse] + releaseReservation *connect.Client[datacatalog.ReleaseReservationRequest, datacatalog.ReleaseReservationResponse] +} + +// CreateDataset calls flyteidl2.datacatalog.DataCatalog.CreateDataset. +func (c *dataCatalogClient) CreateDataset(ctx context.Context, req *connect.Request[datacatalog.CreateDatasetRequest]) (*connect.Response[datacatalog.CreateDatasetResponse], error) { + return c.createDataset.CallUnary(ctx, req) +} + +// GetDataset calls flyteidl2.datacatalog.DataCatalog.GetDataset. +func (c *dataCatalogClient) GetDataset(ctx context.Context, req *connect.Request[datacatalog.GetDatasetRequest]) (*connect.Response[datacatalog.GetDatasetResponse], error) { + return c.getDataset.CallUnary(ctx, req) +} + +// CreateArtifact calls flyteidl2.datacatalog.DataCatalog.CreateArtifact. +func (c *dataCatalogClient) CreateArtifact(ctx context.Context, req *connect.Request[datacatalog.CreateArtifactRequest]) (*connect.Response[datacatalog.CreateArtifactResponse], error) { + return c.createArtifact.CallUnary(ctx, req) +} + +// GetArtifact calls flyteidl2.datacatalog.DataCatalog.GetArtifact. +func (c *dataCatalogClient) GetArtifact(ctx context.Context, req *connect.Request[datacatalog.GetArtifactRequest]) (*connect.Response[datacatalog.GetArtifactResponse], error) { + return c.getArtifact.CallUnary(ctx, req) +} + +// AddTag calls flyteidl2.datacatalog.DataCatalog.AddTag. +func (c *dataCatalogClient) AddTag(ctx context.Context, req *connect.Request[datacatalog.AddTagRequest]) (*connect.Response[datacatalog.AddTagResponse], error) { + return c.addTag.CallUnary(ctx, req) +} + +// ListArtifacts calls flyteidl2.datacatalog.DataCatalog.ListArtifacts. +func (c *dataCatalogClient) ListArtifacts(ctx context.Context, req *connect.Request[datacatalog.ListArtifactsRequest]) (*connect.Response[datacatalog.ListArtifactsResponse], error) { + return c.listArtifacts.CallUnary(ctx, req) +} + +// ListDatasets calls flyteidl2.datacatalog.DataCatalog.ListDatasets. +func (c *dataCatalogClient) ListDatasets(ctx context.Context, req *connect.Request[datacatalog.ListDatasetsRequest]) (*connect.Response[datacatalog.ListDatasetsResponse], error) { + return c.listDatasets.CallUnary(ctx, req) +} + +// UpdateArtifact calls flyteidl2.datacatalog.DataCatalog.UpdateArtifact. +func (c *dataCatalogClient) UpdateArtifact(ctx context.Context, req *connect.Request[datacatalog.UpdateArtifactRequest]) (*connect.Response[datacatalog.UpdateArtifactResponse], error) { + return c.updateArtifact.CallUnary(ctx, req) +} + +// GetOrExtendReservation calls flyteidl2.datacatalog.DataCatalog.GetOrExtendReservation. +func (c *dataCatalogClient) GetOrExtendReservation(ctx context.Context, req *connect.Request[datacatalog.GetOrExtendReservationRequest]) (*connect.Response[datacatalog.GetOrExtendReservationResponse], error) { + return c.getOrExtendReservation.CallUnary(ctx, req) +} + +// ReleaseReservation calls flyteidl2.datacatalog.DataCatalog.ReleaseReservation. +func (c *dataCatalogClient) ReleaseReservation(ctx context.Context, req *connect.Request[datacatalog.ReleaseReservationRequest]) (*connect.Response[datacatalog.ReleaseReservationResponse], error) { + return c.releaseReservation.CallUnary(ctx, req) +} + +// DataCatalogHandler is an implementation of the flyteidl2.datacatalog.DataCatalog service. +type DataCatalogHandler interface { + // Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + // Each dataset can have one or more artifacts + CreateDataset(context.Context, *connect.Request[datacatalog.CreateDatasetRequest]) (*connect.Response[datacatalog.CreateDatasetResponse], error) + // Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + GetDataset(context.Context, *connect.Request[datacatalog.GetDatasetRequest]) (*connect.Response[datacatalog.GetDatasetResponse], error) + // Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + // files or data values + CreateArtifact(context.Context, *connect.Request[datacatalog.CreateArtifactRequest]) (*connect.Response[datacatalog.CreateArtifactResponse], error) + // Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + GetArtifact(context.Context, *connect.Request[datacatalog.GetArtifactRequest]) (*connect.Response[datacatalog.GetArtifactResponse], error) + // Associate a tag with an artifact. Tags are unique within a Dataset. + AddTag(context.Context, *connect.Request[datacatalog.AddTagRequest]) (*connect.Response[datacatalog.AddTagResponse], error) + // Return a paginated list of artifacts + ListArtifacts(context.Context, *connect.Request[datacatalog.ListArtifactsRequest]) (*connect.Response[datacatalog.ListArtifactsResponse], error) + // Return a paginated list of datasets + ListDatasets(context.Context, *connect.Request[datacatalog.ListDatasetsRequest]) (*connect.Response[datacatalog.ListDatasetsResponse], error) + // Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + UpdateArtifact(context.Context, *connect.Request[datacatalog.UpdateArtifactRequest]) (*connect.Response[datacatalog.UpdateArtifactResponse], error) + // Attempts to get or extend a reservation for the corresponding artifact. If one already exists + // (ie. another entity owns the reservation) then that reservation is retrieved. + // Once you acquire a reservation, you need to periodically extend the reservation with an + // identical call. If the reservation is not extended before the defined expiration, it may be + // acquired by another task. + // Note: We may have multiple concurrent tasks with the same signature and the same input that + // try to populate the same artifact at the same time. Thus with reservation, only one task can + // run at a time, until the reservation expires. + // Note: If task A does not extend the reservation in time and the reservation expires, another + // task B may take over the reservation, resulting in two tasks A and B running in parallel. So + // a third task C may get the Artifact from A or B, whichever writes last. + GetOrExtendReservation(context.Context, *connect.Request[datacatalog.GetOrExtendReservationRequest]) (*connect.Response[datacatalog.GetOrExtendReservationResponse], error) + // Release the reservation when the task holding the spot fails so that the other tasks + // can grab the spot. + ReleaseReservation(context.Context, *connect.Request[datacatalog.ReleaseReservationRequest]) (*connect.Response[datacatalog.ReleaseReservationResponse], error) +} + +// NewDataCatalogHandler builds an HTTP handler from the service implementation. It returns the path +// on which to mount the handler and the handler itself. +// +// By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf +// and JSON codecs. They also support gzip compression. +func NewDataCatalogHandler(svc DataCatalogHandler, opts ...connect.HandlerOption) (string, http.Handler) { + dataCatalogCreateDatasetHandler := connect.NewUnaryHandler( + DataCatalogCreateDatasetProcedure, + svc.CreateDataset, + connect.WithSchema(dataCatalogCreateDatasetMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogGetDatasetHandler := connect.NewUnaryHandler( + DataCatalogGetDatasetProcedure, + svc.GetDataset, + connect.WithSchema(dataCatalogGetDatasetMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogCreateArtifactHandler := connect.NewUnaryHandler( + DataCatalogCreateArtifactProcedure, + svc.CreateArtifact, + connect.WithSchema(dataCatalogCreateArtifactMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogGetArtifactHandler := connect.NewUnaryHandler( + DataCatalogGetArtifactProcedure, + svc.GetArtifact, + connect.WithSchema(dataCatalogGetArtifactMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogAddTagHandler := connect.NewUnaryHandler( + DataCatalogAddTagProcedure, + svc.AddTag, + connect.WithSchema(dataCatalogAddTagMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogListArtifactsHandler := connect.NewUnaryHandler( + DataCatalogListArtifactsProcedure, + svc.ListArtifacts, + connect.WithSchema(dataCatalogListArtifactsMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogListDatasetsHandler := connect.NewUnaryHandler( + DataCatalogListDatasetsProcedure, + svc.ListDatasets, + connect.WithSchema(dataCatalogListDatasetsMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogUpdateArtifactHandler := connect.NewUnaryHandler( + DataCatalogUpdateArtifactProcedure, + svc.UpdateArtifact, + connect.WithSchema(dataCatalogUpdateArtifactMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogGetOrExtendReservationHandler := connect.NewUnaryHandler( + DataCatalogGetOrExtendReservationProcedure, + svc.GetOrExtendReservation, + connect.WithSchema(dataCatalogGetOrExtendReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + dataCatalogReleaseReservationHandler := connect.NewUnaryHandler( + DataCatalogReleaseReservationProcedure, + svc.ReleaseReservation, + connect.WithSchema(dataCatalogReleaseReservationMethodDescriptor), + connect.WithHandlerOptions(opts...), + ) + return "/flyteidl2.datacatalog.DataCatalog/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + switch r.URL.Path { + case DataCatalogCreateDatasetProcedure: + dataCatalogCreateDatasetHandler.ServeHTTP(w, r) + case DataCatalogGetDatasetProcedure: + dataCatalogGetDatasetHandler.ServeHTTP(w, r) + case DataCatalogCreateArtifactProcedure: + dataCatalogCreateArtifactHandler.ServeHTTP(w, r) + case DataCatalogGetArtifactProcedure: + dataCatalogGetArtifactHandler.ServeHTTP(w, r) + case DataCatalogAddTagProcedure: + dataCatalogAddTagHandler.ServeHTTP(w, r) + case DataCatalogListArtifactsProcedure: + dataCatalogListArtifactsHandler.ServeHTTP(w, r) + case DataCatalogListDatasetsProcedure: + dataCatalogListDatasetsHandler.ServeHTTP(w, r) + case DataCatalogUpdateArtifactProcedure: + dataCatalogUpdateArtifactHandler.ServeHTTP(w, r) + case DataCatalogGetOrExtendReservationProcedure: + dataCatalogGetOrExtendReservationHandler.ServeHTTP(w, r) + case DataCatalogReleaseReservationProcedure: + dataCatalogReleaseReservationHandler.ServeHTTP(w, r) + default: + http.NotFound(w, r) + } + }) +} + +// UnimplementedDataCatalogHandler returns CodeUnimplemented from all methods. +type UnimplementedDataCatalogHandler struct{} + +func (UnimplementedDataCatalogHandler) CreateDataset(context.Context, *connect.Request[datacatalog.CreateDatasetRequest]) (*connect.Response[datacatalog.CreateDatasetResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.CreateDataset is not implemented")) +} + +func (UnimplementedDataCatalogHandler) GetDataset(context.Context, *connect.Request[datacatalog.GetDatasetRequest]) (*connect.Response[datacatalog.GetDatasetResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.GetDataset is not implemented")) +} + +func (UnimplementedDataCatalogHandler) CreateArtifact(context.Context, *connect.Request[datacatalog.CreateArtifactRequest]) (*connect.Response[datacatalog.CreateArtifactResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.CreateArtifact is not implemented")) +} + +func (UnimplementedDataCatalogHandler) GetArtifact(context.Context, *connect.Request[datacatalog.GetArtifactRequest]) (*connect.Response[datacatalog.GetArtifactResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.GetArtifact is not implemented")) +} + +func (UnimplementedDataCatalogHandler) AddTag(context.Context, *connect.Request[datacatalog.AddTagRequest]) (*connect.Response[datacatalog.AddTagResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.AddTag is not implemented")) +} + +func (UnimplementedDataCatalogHandler) ListArtifacts(context.Context, *connect.Request[datacatalog.ListArtifactsRequest]) (*connect.Response[datacatalog.ListArtifactsResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.ListArtifacts is not implemented")) +} + +func (UnimplementedDataCatalogHandler) ListDatasets(context.Context, *connect.Request[datacatalog.ListDatasetsRequest]) (*connect.Response[datacatalog.ListDatasetsResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.ListDatasets is not implemented")) +} + +func (UnimplementedDataCatalogHandler) UpdateArtifact(context.Context, *connect.Request[datacatalog.UpdateArtifactRequest]) (*connect.Response[datacatalog.UpdateArtifactResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.UpdateArtifact is not implemented")) +} + +func (UnimplementedDataCatalogHandler) GetOrExtendReservation(context.Context, *connect.Request[datacatalog.GetOrExtendReservationRequest]) (*connect.Response[datacatalog.GetOrExtendReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.GetOrExtendReservation is not implemented")) +} + +func (UnimplementedDataCatalogHandler) ReleaseReservation(context.Context, *connect.Request[datacatalog.ReleaseReservationRequest]) (*connect.Response[datacatalog.ReleaseReservationResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("flyteidl2.datacatalog.DataCatalog.ReleaseReservation is not implemented")) +} diff --git a/gen/go/flyteidl2/event/cloudevents.pb.go b/gen/go/flyteidl2/event/cloudevents.pb.go new file mode 100644 index 0000000000..0266bb6336 --- /dev/null +++ b/gen/go/flyteidl2/event/cloudevents.pb.go @@ -0,0 +1,644 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/event/cloudevents.proto + +package event + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional +// information that downstream consumers may find useful. +type CloudEventWorkflowExecution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RawEvent *WorkflowExecutionEvent `protobuf:"bytes,1,opt,name=raw_event,json=rawEvent,proto3" json:"raw_event,omitempty"` + OutputInterface *core.TypedInterface `protobuf:"bytes,2,opt,name=output_interface,json=outputInterface,proto3" json:"output_interface,omitempty"` + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + ArtifactIds []*core.ArtifactID `protobuf:"bytes,3,rep,name=artifact_ids,json=artifactIds,proto3" json:"artifact_ids,omitempty"` + ReferenceExecution *core.WorkflowExecutionIdentifier `protobuf:"bytes,4,opt,name=reference_execution,json=referenceExecution,proto3" json:"reference_execution,omitempty"` + Principal string `protobuf:"bytes,5,opt,name=principal,proto3" json:"principal,omitempty"` + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + LaunchPlanId *core.Identifier `protobuf:"bytes,6,opt,name=launch_plan_id,json=launchPlanId,proto3" json:"launch_plan_id,omitempty"` + // We can't have the ExecutionMetadata object directly because of import cycle + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CloudEventWorkflowExecution) Reset() { + *x = CloudEventWorkflowExecution{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventWorkflowExecution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventWorkflowExecution) ProtoMessage() {} + +func (x *CloudEventWorkflowExecution) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventWorkflowExecution.ProtoReflect.Descriptor instead. +func (*CloudEventWorkflowExecution) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_cloudevents_proto_rawDescGZIP(), []int{0} +} + +func (x *CloudEventWorkflowExecution) GetRawEvent() *WorkflowExecutionEvent { + if x != nil { + return x.RawEvent + } + return nil +} + +func (x *CloudEventWorkflowExecution) GetOutputInterface() *core.TypedInterface { + if x != nil { + return x.OutputInterface + } + return nil +} + +func (x *CloudEventWorkflowExecution) GetArtifactIds() []*core.ArtifactID { + if x != nil { + return x.ArtifactIds + } + return nil +} + +func (x *CloudEventWorkflowExecution) GetReferenceExecution() *core.WorkflowExecutionIdentifier { + if x != nil { + return x.ReferenceExecution + } + return nil +} + +func (x *CloudEventWorkflowExecution) GetPrincipal() string { + if x != nil { + return x.Principal + } + return "" +} + +func (x *CloudEventWorkflowExecution) GetLaunchPlanId() *core.Identifier { + if x != nil { + return x.LaunchPlanId + } + return nil +} + +func (x *CloudEventWorkflowExecution) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type CloudEventNodeExecution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RawEvent *NodeExecutionEvent `protobuf:"bytes,1,opt,name=raw_event,json=rawEvent,proto3" json:"raw_event,omitempty"` + // The relevant task execution if applicable + TaskExecId *core.TaskExecutionIdentifier `protobuf:"bytes,2,opt,name=task_exec_id,json=taskExecId,proto3" json:"task_exec_id,omitempty"` + // The typed interface for the task that produced the event. + OutputInterface *core.TypedInterface `protobuf:"bytes,3,opt,name=output_interface,json=outputInterface,proto3" json:"output_interface,omitempty"` + // The following are ExecutionMetadata fields + // We can't have the ExecutionMetadata object directly because of import cycle + ArtifactIds []*core.ArtifactID `protobuf:"bytes,4,rep,name=artifact_ids,json=artifactIds,proto3" json:"artifact_ids,omitempty"` + Principal string `protobuf:"bytes,5,opt,name=principal,proto3" json:"principal,omitempty"` + // The ID of the LP that generated the execution that generated the Artifact. + // Here for provenance information. + // Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + LaunchPlanId *core.Identifier `protobuf:"bytes,6,opt,name=launch_plan_id,json=launchPlanId,proto3" json:"launch_plan_id,omitempty"` + // We can't have the ExecutionMetadata object directly because of import cycle + Labels map[string]string `protobuf:"bytes,7,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CloudEventNodeExecution) Reset() { + *x = CloudEventNodeExecution{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventNodeExecution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventNodeExecution) ProtoMessage() {} + +func (x *CloudEventNodeExecution) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventNodeExecution.ProtoReflect.Descriptor instead. +func (*CloudEventNodeExecution) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_cloudevents_proto_rawDescGZIP(), []int{1} +} + +func (x *CloudEventNodeExecution) GetRawEvent() *NodeExecutionEvent { + if x != nil { + return x.RawEvent + } + return nil +} + +func (x *CloudEventNodeExecution) GetTaskExecId() *core.TaskExecutionIdentifier { + if x != nil { + return x.TaskExecId + } + return nil +} + +func (x *CloudEventNodeExecution) GetOutputInterface() *core.TypedInterface { + if x != nil { + return x.OutputInterface + } + return nil +} + +func (x *CloudEventNodeExecution) GetArtifactIds() []*core.ArtifactID { + if x != nil { + return x.ArtifactIds + } + return nil +} + +func (x *CloudEventNodeExecution) GetPrincipal() string { + if x != nil { + return x.Principal + } + return "" +} + +func (x *CloudEventNodeExecution) GetLaunchPlanId() *core.Identifier { + if x != nil { + return x.LaunchPlanId + } + return nil +} + +func (x *CloudEventNodeExecution) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +type CloudEventTaskExecution struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RawEvent *TaskExecutionEvent `protobuf:"bytes,1,opt,name=raw_event,json=rawEvent,proto3" json:"raw_event,omitempty"` + // We can't have the ExecutionMetadata object directly because of import cycle + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CloudEventTaskExecution) Reset() { + *x = CloudEventTaskExecution{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventTaskExecution) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventTaskExecution) ProtoMessage() {} + +func (x *CloudEventTaskExecution) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventTaskExecution.ProtoReflect.Descriptor instead. +func (*CloudEventTaskExecution) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_cloudevents_proto_rawDescGZIP(), []int{2} +} + +func (x *CloudEventTaskExecution) GetRawEvent() *TaskExecutionEvent { + if x != nil { + return x.RawEvent + } + return nil +} + +func (x *CloudEventTaskExecution) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// This event is to be sent by Admin after it creates an execution. +type CloudEventExecutionStart struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The execution created. + ExecutionId *core.WorkflowExecutionIdentifier `protobuf:"bytes,1,opt,name=execution_id,json=executionId,proto3" json:"execution_id,omitempty"` + // The launch plan used. + LaunchPlanId *core.Identifier `protobuf:"bytes,2,opt,name=launch_plan_id,json=launchPlanId,proto3" json:"launch_plan_id,omitempty"` + WorkflowId *core.Identifier `protobuf:"bytes,3,opt,name=workflow_id,json=workflowId,proto3" json:"workflow_id,omitempty"` + // Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. + ArtifactIds []*core.ArtifactID `protobuf:"bytes,4,rep,name=artifact_ids,json=artifactIds,proto3" json:"artifact_ids,omitempty"` + // Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. + ArtifactTrackers []string `protobuf:"bytes,5,rep,name=artifact_trackers,json=artifactTrackers,proto3" json:"artifact_trackers,omitempty"` + Principal string `protobuf:"bytes,6,opt,name=principal,proto3" json:"principal,omitempty"` +} + +func (x *CloudEventExecutionStart) Reset() { + *x = CloudEventExecutionStart{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CloudEventExecutionStart) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CloudEventExecutionStart) ProtoMessage() {} + +func (x *CloudEventExecutionStart) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_cloudevents_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CloudEventExecutionStart.ProtoReflect.Descriptor instead. +func (*CloudEventExecutionStart) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_cloudevents_proto_rawDescGZIP(), []int{3} +} + +func (x *CloudEventExecutionStart) GetExecutionId() *core.WorkflowExecutionIdentifier { + if x != nil { + return x.ExecutionId + } + return nil +} + +func (x *CloudEventExecutionStart) GetLaunchPlanId() *core.Identifier { + if x != nil { + return x.LaunchPlanId + } + return nil +} + +func (x *CloudEventExecutionStart) GetWorkflowId() *core.Identifier { + if x != nil { + return x.WorkflowId + } + return nil +} + +func (x *CloudEventExecutionStart) GetArtifactIds() []*core.ArtifactID { + if x != nil { + return x.ArtifactIds + } + return nil +} + +func (x *CloudEventExecutionStart) GetArtifactTrackers() []string { + if x != nil { + return x.ArtifactTrackers + } + return nil +} + +func (x *CloudEventExecutionStart) GetPrincipal() string { + if x != nil { + return x.Principal + } + return "" +} + +var File_flyteidl2_event_cloudevents_proto protoreflect.FileDescriptor + +var file_flyteidl2_event_cloudevents_proto_rawDesc = []byte{ + 0x0a, 0x21, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb8, 0x04, 0x0a, 0x1b, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x72, 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x10, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, + 0x66, 0x61, 0x63, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x49, 0x6e, 0x74, 0x65, + 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x44, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x49, 0x64, 0x73, 0x12, 0x5c, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, + 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x12, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, + 0x12, 0x40, 0x0a, 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, + 0x49, 0x64, 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x38, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, + 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, + 0x99, 0x04, 0x0a, 0x17, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, + 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x09, 0x72, + 0x61, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, + 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x74, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, + 0x61, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x49, 0x44, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, + 0x64, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, + 0x12, 0x40, 0x0a, 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, + 0x69, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, + 0x49, 0x64, 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, + 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, + 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe4, 0x01, 0x0a, 0x17, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, + 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, + 0x08, 0x72, 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0xf3, 0x02, 0x0a, 0x18, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, + 0x4e, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, + 0x40, 0x0a, 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x69, + 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x49, + 0x64, 0x12, 0x3b, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, 0x3d, + 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x44, + 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, + 0x72, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, + 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, + 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0xbb, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, + 0x42, 0x10, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xa2, 0x02, 0x03, 0x46, 0x45, + 0x58, 0xaa, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0xca, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0xe2, 0x02, 0x1b, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, + 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_event_cloudevents_proto_rawDescOnce sync.Once + file_flyteidl2_event_cloudevents_proto_rawDescData = file_flyteidl2_event_cloudevents_proto_rawDesc +) + +func file_flyteidl2_event_cloudevents_proto_rawDescGZIP() []byte { + file_flyteidl2_event_cloudevents_proto_rawDescOnce.Do(func() { + file_flyteidl2_event_cloudevents_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_event_cloudevents_proto_rawDescData) + }) + return file_flyteidl2_event_cloudevents_proto_rawDescData +} + +var file_flyteidl2_event_cloudevents_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_flyteidl2_event_cloudevents_proto_goTypes = []interface{}{ + (*CloudEventWorkflowExecution)(nil), // 0: flyteidl2.event.CloudEventWorkflowExecution + (*CloudEventNodeExecution)(nil), // 1: flyteidl2.event.CloudEventNodeExecution + (*CloudEventTaskExecution)(nil), // 2: flyteidl2.event.CloudEventTaskExecution + (*CloudEventExecutionStart)(nil), // 3: flyteidl2.event.CloudEventExecutionStart + nil, // 4: flyteidl2.event.CloudEventWorkflowExecution.LabelsEntry + nil, // 5: flyteidl2.event.CloudEventNodeExecution.LabelsEntry + nil, // 6: flyteidl2.event.CloudEventTaskExecution.LabelsEntry + (*WorkflowExecutionEvent)(nil), // 7: flyteidl2.event.WorkflowExecutionEvent + (*core.TypedInterface)(nil), // 8: flyteidl2.core.TypedInterface + (*core.ArtifactID)(nil), // 9: flyteidl2.core.ArtifactID + (*core.WorkflowExecutionIdentifier)(nil), // 10: flyteidl2.core.WorkflowExecutionIdentifier + (*core.Identifier)(nil), // 11: flyteidl2.core.Identifier + (*NodeExecutionEvent)(nil), // 12: flyteidl2.event.NodeExecutionEvent + (*core.TaskExecutionIdentifier)(nil), // 13: flyteidl2.core.TaskExecutionIdentifier + (*TaskExecutionEvent)(nil), // 14: flyteidl2.event.TaskExecutionEvent +} +var file_flyteidl2_event_cloudevents_proto_depIdxs = []int32{ + 7, // 0: flyteidl2.event.CloudEventWorkflowExecution.raw_event:type_name -> flyteidl2.event.WorkflowExecutionEvent + 8, // 1: flyteidl2.event.CloudEventWorkflowExecution.output_interface:type_name -> flyteidl2.core.TypedInterface + 9, // 2: flyteidl2.event.CloudEventWorkflowExecution.artifact_ids:type_name -> flyteidl2.core.ArtifactID + 10, // 3: flyteidl2.event.CloudEventWorkflowExecution.reference_execution:type_name -> flyteidl2.core.WorkflowExecutionIdentifier + 11, // 4: flyteidl2.event.CloudEventWorkflowExecution.launch_plan_id:type_name -> flyteidl2.core.Identifier + 4, // 5: flyteidl2.event.CloudEventWorkflowExecution.labels:type_name -> flyteidl2.event.CloudEventWorkflowExecution.LabelsEntry + 12, // 6: flyteidl2.event.CloudEventNodeExecution.raw_event:type_name -> flyteidl2.event.NodeExecutionEvent + 13, // 7: flyteidl2.event.CloudEventNodeExecution.task_exec_id:type_name -> flyteidl2.core.TaskExecutionIdentifier + 8, // 8: flyteidl2.event.CloudEventNodeExecution.output_interface:type_name -> flyteidl2.core.TypedInterface + 9, // 9: flyteidl2.event.CloudEventNodeExecution.artifact_ids:type_name -> flyteidl2.core.ArtifactID + 11, // 10: flyteidl2.event.CloudEventNodeExecution.launch_plan_id:type_name -> flyteidl2.core.Identifier + 5, // 11: flyteidl2.event.CloudEventNodeExecution.labels:type_name -> flyteidl2.event.CloudEventNodeExecution.LabelsEntry + 14, // 12: flyteidl2.event.CloudEventTaskExecution.raw_event:type_name -> flyteidl2.event.TaskExecutionEvent + 6, // 13: flyteidl2.event.CloudEventTaskExecution.labels:type_name -> flyteidl2.event.CloudEventTaskExecution.LabelsEntry + 10, // 14: flyteidl2.event.CloudEventExecutionStart.execution_id:type_name -> flyteidl2.core.WorkflowExecutionIdentifier + 11, // 15: flyteidl2.event.CloudEventExecutionStart.launch_plan_id:type_name -> flyteidl2.core.Identifier + 11, // 16: flyteidl2.event.CloudEventExecutionStart.workflow_id:type_name -> flyteidl2.core.Identifier + 9, // 17: flyteidl2.event.CloudEventExecutionStart.artifact_ids:type_name -> flyteidl2.core.ArtifactID + 18, // [18:18] is the sub-list for method output_type + 18, // [18:18] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name +} + +func init() { file_flyteidl2_event_cloudevents_proto_init() } +func file_flyteidl2_event_cloudevents_proto_init() { + if File_flyteidl2_event_cloudevents_proto != nil { + return + } + file_flyteidl2_event_event_proto_init() + if !protoimpl.UnsafeEnabled { + file_flyteidl2_event_cloudevents_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventWorkflowExecution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_cloudevents_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventNodeExecution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_cloudevents_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventTaskExecution); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_cloudevents_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CloudEventExecutionStart); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_event_cloudevents_proto_rawDesc, + NumEnums: 0, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_event_cloudevents_proto_goTypes, + DependencyIndexes: file_flyteidl2_event_cloudevents_proto_depIdxs, + MessageInfos: file_flyteidl2_event_cloudevents_proto_msgTypes, + }.Build() + File_flyteidl2_event_cloudevents_proto = out.File + file_flyteidl2_event_cloudevents_proto_rawDesc = nil + file_flyteidl2_event_cloudevents_proto_goTypes = nil + file_flyteidl2_event_cloudevents_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/event/cloudevents.pb.validate.go b/gen/go/flyteidl2/event/cloudevents.pb.validate.go new file mode 100644 index 0000000000..f00519e83f --- /dev/null +++ b/gen/go/flyteidl2/event/cloudevents.pb.validate.go @@ -0,0 +1,907 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/event/cloudevents.proto + +package event + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CloudEventWorkflowExecution with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CloudEventWorkflowExecution) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CloudEventWorkflowExecution with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CloudEventWorkflowExecutionMultiError, or nil if none found. +func (m *CloudEventWorkflowExecution) ValidateAll() error { + return m.validate(true) +} + +func (m *CloudEventWorkflowExecution) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRawEvent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRawEvent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventWorkflowExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOutputInterface()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputInterface()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventWorkflowExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetArtifactIds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventWorkflowExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetReferenceExecution()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "ReferenceExecution", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "ReferenceExecution", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReferenceExecution()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventWorkflowExecutionValidationError{ + field: "ReferenceExecution", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Principal + + if all { + switch v := interface{}(m.GetLaunchPlanId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventWorkflowExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLaunchPlanId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventWorkflowExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Labels + + if len(errors) > 0 { + return CloudEventWorkflowExecutionMultiError(errors) + } + + return nil +} + +// CloudEventWorkflowExecutionMultiError is an error wrapping multiple +// validation errors returned by CloudEventWorkflowExecution.ValidateAll() if +// the designated constraints aren't met. +type CloudEventWorkflowExecutionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CloudEventWorkflowExecutionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CloudEventWorkflowExecutionMultiError) AllErrors() []error { return m } + +// CloudEventWorkflowExecutionValidationError is the validation error returned +// by CloudEventWorkflowExecution.Validate if the designated constraints +// aren't met. +type CloudEventWorkflowExecutionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CloudEventWorkflowExecutionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CloudEventWorkflowExecutionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CloudEventWorkflowExecutionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CloudEventWorkflowExecutionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CloudEventWorkflowExecutionValidationError) ErrorName() string { + return "CloudEventWorkflowExecutionValidationError" +} + +// Error satisfies the builtin error interface +func (e CloudEventWorkflowExecutionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCloudEventWorkflowExecution.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CloudEventWorkflowExecutionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CloudEventWorkflowExecutionValidationError{} + +// Validate checks the field values on CloudEventNodeExecution with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CloudEventNodeExecution) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CloudEventNodeExecution with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CloudEventNodeExecutionMultiError, or nil if none found. +func (m *CloudEventNodeExecution) ValidateAll() error { + return m.validate(true) +} + +func (m *CloudEventNodeExecution) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRawEvent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRawEvent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventNodeExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetTaskExecId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "TaskExecId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "TaskExecId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTaskExecId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventNodeExecutionValidationError{ + field: "TaskExecId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetOutputInterface()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputInterface()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventNodeExecutionValidationError{ + field: "OutputInterface", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetArtifactIds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventNodeExecutionValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Principal + + if all { + switch v := interface{}(m.GetLaunchPlanId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventNodeExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLaunchPlanId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventNodeExecutionValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Labels + + if len(errors) > 0 { + return CloudEventNodeExecutionMultiError(errors) + } + + return nil +} + +// CloudEventNodeExecutionMultiError is an error wrapping multiple validation +// errors returned by CloudEventNodeExecution.ValidateAll() if the designated +// constraints aren't met. +type CloudEventNodeExecutionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CloudEventNodeExecutionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CloudEventNodeExecutionMultiError) AllErrors() []error { return m } + +// CloudEventNodeExecutionValidationError is the validation error returned by +// CloudEventNodeExecution.Validate if the designated constraints aren't met. +type CloudEventNodeExecutionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CloudEventNodeExecutionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CloudEventNodeExecutionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CloudEventNodeExecutionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CloudEventNodeExecutionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CloudEventNodeExecutionValidationError) ErrorName() string { + return "CloudEventNodeExecutionValidationError" +} + +// Error satisfies the builtin error interface +func (e CloudEventNodeExecutionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCloudEventNodeExecution.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CloudEventNodeExecutionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CloudEventNodeExecutionValidationError{} + +// Validate checks the field values on CloudEventTaskExecution with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CloudEventTaskExecution) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CloudEventTaskExecution with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CloudEventTaskExecutionMultiError, or nil if none found. +func (m *CloudEventTaskExecution) ValidateAll() error { + return m.validate(true) +} + +func (m *CloudEventTaskExecution) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetRawEvent()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventTaskExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventTaskExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRawEvent()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventTaskExecutionValidationError{ + field: "RawEvent", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Labels + + if len(errors) > 0 { + return CloudEventTaskExecutionMultiError(errors) + } + + return nil +} + +// CloudEventTaskExecutionMultiError is an error wrapping multiple validation +// errors returned by CloudEventTaskExecution.ValidateAll() if the designated +// constraints aren't met. +type CloudEventTaskExecutionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CloudEventTaskExecutionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CloudEventTaskExecutionMultiError) AllErrors() []error { return m } + +// CloudEventTaskExecutionValidationError is the validation error returned by +// CloudEventTaskExecution.Validate if the designated constraints aren't met. +type CloudEventTaskExecutionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CloudEventTaskExecutionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CloudEventTaskExecutionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CloudEventTaskExecutionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CloudEventTaskExecutionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CloudEventTaskExecutionValidationError) ErrorName() string { + return "CloudEventTaskExecutionValidationError" +} + +// Error satisfies the builtin error interface +func (e CloudEventTaskExecutionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCloudEventTaskExecution.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CloudEventTaskExecutionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CloudEventTaskExecutionValidationError{} + +// Validate checks the field values on CloudEventExecutionStart with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *CloudEventExecutionStart) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CloudEventExecutionStart with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CloudEventExecutionStartMultiError, or nil if none found. +func (m *CloudEventExecutionStart) ValidateAll() error { + return m.validate(true) +} + +func (m *CloudEventExecutionStart) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetExecutionId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExecutionId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventExecutionStartValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLaunchPlanId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLaunchPlanId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventExecutionStartValidationError{ + field: "LaunchPlanId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetWorkflowId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "WorkflowId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: "WorkflowId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkflowId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventExecutionStartValidationError{ + field: "WorkflowId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + for idx, item := range m.GetArtifactIds() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CloudEventExecutionStartValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CloudEventExecutionStartValidationError{ + field: fmt.Sprintf("ArtifactIds[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for Principal + + if len(errors) > 0 { + return CloudEventExecutionStartMultiError(errors) + } + + return nil +} + +// CloudEventExecutionStartMultiError is an error wrapping multiple validation +// errors returned by CloudEventExecutionStart.ValidateAll() if the designated +// constraints aren't met. +type CloudEventExecutionStartMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CloudEventExecutionStartMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CloudEventExecutionStartMultiError) AllErrors() []error { return m } + +// CloudEventExecutionStartValidationError is the validation error returned by +// CloudEventExecutionStart.Validate if the designated constraints aren't met. +type CloudEventExecutionStartValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CloudEventExecutionStartValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CloudEventExecutionStartValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CloudEventExecutionStartValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CloudEventExecutionStartValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CloudEventExecutionStartValidationError) ErrorName() string { + return "CloudEventExecutionStartValidationError" +} + +// Error satisfies the builtin error interface +func (e CloudEventExecutionStartValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCloudEventExecutionStart.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CloudEventExecutionStartValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CloudEventExecutionStartValidationError{} diff --git a/gen/go/flyteidl2/event/event.pb.go b/gen/go/flyteidl2/event/event.pb.go new file mode 100644 index 0000000000..2edec2a5bf --- /dev/null +++ b/gen/go/flyteidl2/event/event.pb.go @@ -0,0 +1,2042 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/event/event.proto + +package event + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Includes the broad category of machine used for this specific task execution. +type TaskExecutionMetadata_InstanceClass int32 + +const ( + // The default instance class configured for the flyte application platform. + TaskExecutionMetadata_DEFAULT TaskExecutionMetadata_InstanceClass = 0 + // The instance class configured for interruptible tasks. + TaskExecutionMetadata_INTERRUPTIBLE TaskExecutionMetadata_InstanceClass = 1 +) + +// Enum value maps for TaskExecutionMetadata_InstanceClass. +var ( + TaskExecutionMetadata_InstanceClass_name = map[int32]string{ + 0: "DEFAULT", + 1: "INTERRUPTIBLE", + } + TaskExecutionMetadata_InstanceClass_value = map[string]int32{ + "DEFAULT": 0, + "INTERRUPTIBLE": 1, + } +) + +func (x TaskExecutionMetadata_InstanceClass) Enum() *TaskExecutionMetadata_InstanceClass { + p := new(TaskExecutionMetadata_InstanceClass) + *p = x + return p +} + +func (x TaskExecutionMetadata_InstanceClass) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TaskExecutionMetadata_InstanceClass) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_event_event_proto_enumTypes[0].Descriptor() +} + +func (TaskExecutionMetadata_InstanceClass) Type() protoreflect.EnumType { + return &file_flyteidl2_event_event_proto_enumTypes[0] +} + +func (x TaskExecutionMetadata_InstanceClass) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use TaskExecutionMetadata_InstanceClass.Descriptor instead. +func (TaskExecutionMetadata_InstanceClass) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{10, 0} +} + +type WorkflowExecutionEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Workflow execution id + ExecutionId *core.WorkflowExecutionIdentifier `protobuf:"bytes,1,opt,name=execution_id,json=executionId,proto3" json:"execution_id,omitempty"` + // the id of the originator (Propeller) of the event + ProducerId string `protobuf:"bytes,2,opt,name=producer_id,json=producerId,proto3" json:"producer_id,omitempty"` + Phase core.WorkflowExecution_Phase `protobuf:"varint,3,opt,name=phase,proto3,enum=flyteidl2.core.WorkflowExecution_Phase" json:"phase,omitempty"` + // This timestamp represents when the original event occurred, it is generated + // by the executor of the workflow. + OccurredAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=occurred_at,json=occurredAt,proto3" json:"occurred_at,omitempty"` + // Types that are assignable to OutputResult: + // + // *WorkflowExecutionEvent_OutputUri + // *WorkflowExecutionEvent_Error + // *WorkflowExecutionEvent_OutputData + OutputResult isWorkflowExecutionEvent_OutputResult `protobuf_oneof:"output_result"` +} + +func (x *WorkflowExecutionEvent) Reset() { + *x = WorkflowExecutionEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowExecutionEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowExecutionEvent) ProtoMessage() {} + +func (x *WorkflowExecutionEvent) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowExecutionEvent.ProtoReflect.Descriptor instead. +func (*WorkflowExecutionEvent) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{0} +} + +func (x *WorkflowExecutionEvent) GetExecutionId() *core.WorkflowExecutionIdentifier { + if x != nil { + return x.ExecutionId + } + return nil +} + +func (x *WorkflowExecutionEvent) GetProducerId() string { + if x != nil { + return x.ProducerId + } + return "" +} + +func (x *WorkflowExecutionEvent) GetPhase() core.WorkflowExecution_Phase { + if x != nil { + return x.Phase + } + return core.WorkflowExecution_Phase(0) +} + +func (x *WorkflowExecutionEvent) GetOccurredAt() *timestamppb.Timestamp { + if x != nil { + return x.OccurredAt + } + return nil +} + +func (m *WorkflowExecutionEvent) GetOutputResult() isWorkflowExecutionEvent_OutputResult { + if m != nil { + return m.OutputResult + } + return nil +} + +func (x *WorkflowExecutionEvent) GetOutputUri() string { + if x, ok := x.GetOutputResult().(*WorkflowExecutionEvent_OutputUri); ok { + return x.OutputUri + } + return "" +} + +func (x *WorkflowExecutionEvent) GetError() *core.ExecutionError { + if x, ok := x.GetOutputResult().(*WorkflowExecutionEvent_Error); ok { + return x.Error + } + return nil +} + +func (x *WorkflowExecutionEvent) GetOutputData() *core.LiteralMap { + if x, ok := x.GetOutputResult().(*WorkflowExecutionEvent_OutputData); ok { + return x.OutputData + } + return nil +} + +type isWorkflowExecutionEvent_OutputResult interface { + isWorkflowExecutionEvent_OutputResult() +} + +type WorkflowExecutionEvent_OutputUri struct { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + OutputUri string `protobuf:"bytes,5,opt,name=output_uri,json=outputUri,proto3,oneof"` +} + +type WorkflowExecutionEvent_Error struct { + // Error information for the execution + Error *core.ExecutionError `protobuf:"bytes,6,opt,name=error,proto3,oneof"` +} + +type WorkflowExecutionEvent_OutputData struct { + // Raw output data produced by this workflow execution. + OutputData *core.LiteralMap `protobuf:"bytes,7,opt,name=output_data,json=outputData,proto3,oneof"` +} + +func (*WorkflowExecutionEvent_OutputUri) isWorkflowExecutionEvent_OutputResult() {} + +func (*WorkflowExecutionEvent_Error) isWorkflowExecutionEvent_OutputResult() {} + +func (*WorkflowExecutionEvent_OutputData) isWorkflowExecutionEvent_OutputResult() {} + +type NodeExecutionEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique identifier for this node execution + Id *core.NodeExecutionIdentifier `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // the id of the originator (Propeller) of the event + ProducerId string `protobuf:"bytes,2,opt,name=producer_id,json=producerId,proto3" json:"producer_id,omitempty"` + Phase core.NodeExecution_Phase `protobuf:"varint,3,opt,name=phase,proto3,enum=flyteidl2.core.NodeExecution_Phase" json:"phase,omitempty"` + // This timestamp represents when the original event occurred, it is generated + // by the executor of the node. + OccurredAt *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=occurred_at,json=occurredAt,proto3" json:"occurred_at,omitempty"` + // Types that are assignable to InputValue: + // + // *NodeExecutionEvent_InputUri + // *NodeExecutionEvent_InputData + InputValue isNodeExecutionEvent_InputValue `protobuf_oneof:"input_value"` + // Types that are assignable to OutputResult: + // + // *NodeExecutionEvent_OutputUri + // *NodeExecutionEvent_Error + // *NodeExecutionEvent_OutputData + OutputResult isNodeExecutionEvent_OutputResult `protobuf_oneof:"output_result"` + // Additional metadata to do with this event's node target based + // on the node type + // + // Types that are assignable to TargetMetadata: + // + // *NodeExecutionEvent_WorkflowNodeMetadata + // *NodeExecutionEvent_TaskNodeMetadata + TargetMetadata isNodeExecutionEvent_TargetMetadata `protobuf_oneof:"target_metadata"` + // [To be deprecated] Specifies which task (if any) launched this node. + ParentTaskMetadata *ParentTaskExecutionMetadata `protobuf:"bytes,9,opt,name=parent_task_metadata,json=parentTaskMetadata,proto3" json:"parent_task_metadata,omitempty"` + // Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + ParentNodeMetadata *ParentNodeExecutionMetadata `protobuf:"bytes,10,opt,name=parent_node_metadata,json=parentNodeMetadata,proto3" json:"parent_node_metadata,omitempty"` + // Retry group to indicate grouping of nodes by retries + RetryGroup string `protobuf:"bytes,11,opt,name=retry_group,json=retryGroup,proto3" json:"retry_group,omitempty"` + // Identifier of the node in the original workflow/graph + // This maps to value of WorkflowTemplate.nodes[X].id + SpecNodeId string `protobuf:"bytes,12,opt,name=spec_node_id,json=specNodeId,proto3" json:"spec_node_id,omitempty"` + // Friendly readable name for the node + NodeName string `protobuf:"bytes,13,opt,name=node_name,json=nodeName,proto3" json:"node_name,omitempty"` + EventVersion int32 `protobuf:"varint,16,opt,name=event_version,json=eventVersion,proto3" json:"event_version,omitempty"` + // Whether this node launched a subworkflow. + IsParent bool `protobuf:"varint,17,opt,name=is_parent,json=isParent,proto3" json:"is_parent,omitempty"` + // Whether this node yielded a dynamic workflow. + IsDynamic bool `protobuf:"varint,18,opt,name=is_dynamic,json=isDynamic,proto3" json:"is_dynamic,omitempty"` + // String location uniquely identifying where the deck HTML file is + // NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + DeckUri string `protobuf:"bytes,19,opt,name=deck_uri,json=deckUri,proto3" json:"deck_uri,omitempty"` + // This timestamp represents the instant when the event was reported by the executing framework. For example, + // when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + // literal inputs are initially copied. The event however will not be sent until after the copy completes. + // Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + ReportedAt *timestamppb.Timestamp `protobuf:"bytes,21,opt,name=reported_at,json=reportedAt,proto3" json:"reported_at,omitempty"` + // Indicates if this node is an ArrayNode. + IsArray bool `protobuf:"varint,22,opt,name=is_array,json=isArray,proto3" json:"is_array,omitempty"` + // So that Admin doesn't have to rebuild the node execution graph to find the target entity, propeller will fill this + // in optionally - currently this is only filled in for subworkflows. This is the ID of the subworkflow corresponding + // to this node execution. It is difficult to find because Admin only sees one node at a time. A subworkflow could be + // nested multiple layers deep, and you'd need to access the correct workflow template to know the target subworkflow. + TargetEntity *core.Identifier `protobuf:"bytes,23,opt,name=target_entity,json=targetEntity,proto3" json:"target_entity,omitempty"` + // Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of + // the tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not + // even registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea + // if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, + // as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. + IsInDynamicChain bool `protobuf:"varint,24,opt,name=is_in_dynamic_chain,json=isInDynamicChain,proto3" json:"is_in_dynamic_chain,omitempty"` + // Whether this node launched an eager task. + IsEager bool `protobuf:"varint,25,opt,name=is_eager,json=isEager,proto3" json:"is_eager,omitempty"` +} + +func (x *NodeExecutionEvent) Reset() { + *x = NodeExecutionEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NodeExecutionEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NodeExecutionEvent) ProtoMessage() {} + +func (x *NodeExecutionEvent) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NodeExecutionEvent.ProtoReflect.Descriptor instead. +func (*NodeExecutionEvent) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{1} +} + +func (x *NodeExecutionEvent) GetId() *core.NodeExecutionIdentifier { + if x != nil { + return x.Id + } + return nil +} + +func (x *NodeExecutionEvent) GetProducerId() string { + if x != nil { + return x.ProducerId + } + return "" +} + +func (x *NodeExecutionEvent) GetPhase() core.NodeExecution_Phase { + if x != nil { + return x.Phase + } + return core.NodeExecution_Phase(0) +} + +func (x *NodeExecutionEvent) GetOccurredAt() *timestamppb.Timestamp { + if x != nil { + return x.OccurredAt + } + return nil +} + +func (m *NodeExecutionEvent) GetInputValue() isNodeExecutionEvent_InputValue { + if m != nil { + return m.InputValue + } + return nil +} + +func (x *NodeExecutionEvent) GetInputUri() string { + if x, ok := x.GetInputValue().(*NodeExecutionEvent_InputUri); ok { + return x.InputUri + } + return "" +} + +func (x *NodeExecutionEvent) GetInputData() *core.LiteralMap { + if x, ok := x.GetInputValue().(*NodeExecutionEvent_InputData); ok { + return x.InputData + } + return nil +} + +func (m *NodeExecutionEvent) GetOutputResult() isNodeExecutionEvent_OutputResult { + if m != nil { + return m.OutputResult + } + return nil +} + +func (x *NodeExecutionEvent) GetOutputUri() string { + if x, ok := x.GetOutputResult().(*NodeExecutionEvent_OutputUri); ok { + return x.OutputUri + } + return "" +} + +func (x *NodeExecutionEvent) GetError() *core.ExecutionError { + if x, ok := x.GetOutputResult().(*NodeExecutionEvent_Error); ok { + return x.Error + } + return nil +} + +func (x *NodeExecutionEvent) GetOutputData() *core.LiteralMap { + if x, ok := x.GetOutputResult().(*NodeExecutionEvent_OutputData); ok { + return x.OutputData + } + return nil +} + +func (m *NodeExecutionEvent) GetTargetMetadata() isNodeExecutionEvent_TargetMetadata { + if m != nil { + return m.TargetMetadata + } + return nil +} + +func (x *NodeExecutionEvent) GetWorkflowNodeMetadata() *WorkflowNodeMetadata { + if x, ok := x.GetTargetMetadata().(*NodeExecutionEvent_WorkflowNodeMetadata); ok { + return x.WorkflowNodeMetadata + } + return nil +} + +func (x *NodeExecutionEvent) GetTaskNodeMetadata() *TaskNodeMetadata { + if x, ok := x.GetTargetMetadata().(*NodeExecutionEvent_TaskNodeMetadata); ok { + return x.TaskNodeMetadata + } + return nil +} + +func (x *NodeExecutionEvent) GetParentTaskMetadata() *ParentTaskExecutionMetadata { + if x != nil { + return x.ParentTaskMetadata + } + return nil +} + +func (x *NodeExecutionEvent) GetParentNodeMetadata() *ParentNodeExecutionMetadata { + if x != nil { + return x.ParentNodeMetadata + } + return nil +} + +func (x *NodeExecutionEvent) GetRetryGroup() string { + if x != nil { + return x.RetryGroup + } + return "" +} + +func (x *NodeExecutionEvent) GetSpecNodeId() string { + if x != nil { + return x.SpecNodeId + } + return "" +} + +func (x *NodeExecutionEvent) GetNodeName() string { + if x != nil { + return x.NodeName + } + return "" +} + +func (x *NodeExecutionEvent) GetEventVersion() int32 { + if x != nil { + return x.EventVersion + } + return 0 +} + +func (x *NodeExecutionEvent) GetIsParent() bool { + if x != nil { + return x.IsParent + } + return false +} + +func (x *NodeExecutionEvent) GetIsDynamic() bool { + if x != nil { + return x.IsDynamic + } + return false +} + +func (x *NodeExecutionEvent) GetDeckUri() string { + if x != nil { + return x.DeckUri + } + return "" +} + +func (x *NodeExecutionEvent) GetReportedAt() *timestamppb.Timestamp { + if x != nil { + return x.ReportedAt + } + return nil +} + +func (x *NodeExecutionEvent) GetIsArray() bool { + if x != nil { + return x.IsArray + } + return false +} + +func (x *NodeExecutionEvent) GetTargetEntity() *core.Identifier { + if x != nil { + return x.TargetEntity + } + return nil +} + +func (x *NodeExecutionEvent) GetIsInDynamicChain() bool { + if x != nil { + return x.IsInDynamicChain + } + return false +} + +func (x *NodeExecutionEvent) GetIsEager() bool { + if x != nil { + return x.IsEager + } + return false +} + +type isNodeExecutionEvent_InputValue interface { + isNodeExecutionEvent_InputValue() +} + +type NodeExecutionEvent_InputUri struct { + InputUri string `protobuf:"bytes,5,opt,name=input_uri,json=inputUri,proto3,oneof"` +} + +type NodeExecutionEvent_InputData struct { + // Raw input data consumed by this node execution. + InputData *core.LiteralMap `protobuf:"bytes,20,opt,name=input_data,json=inputData,proto3,oneof"` +} + +func (*NodeExecutionEvent_InputUri) isNodeExecutionEvent_InputValue() {} + +func (*NodeExecutionEvent_InputData) isNodeExecutionEvent_InputValue() {} + +type isNodeExecutionEvent_OutputResult interface { + isNodeExecutionEvent_OutputResult() +} + +type NodeExecutionEvent_OutputUri struct { + // URL to the output of the execution, it encodes all the information + // including Cloud source provider. ie., s3://... + OutputUri string `protobuf:"bytes,6,opt,name=output_uri,json=outputUri,proto3,oneof"` +} + +type NodeExecutionEvent_Error struct { + // Error information for the execution + Error *core.ExecutionError `protobuf:"bytes,7,opt,name=error,proto3,oneof"` +} + +type NodeExecutionEvent_OutputData struct { + // Raw output data produced by this node execution. + OutputData *core.LiteralMap `protobuf:"bytes,15,opt,name=output_data,json=outputData,proto3,oneof"` +} + +func (*NodeExecutionEvent_OutputUri) isNodeExecutionEvent_OutputResult() {} + +func (*NodeExecutionEvent_Error) isNodeExecutionEvent_OutputResult() {} + +func (*NodeExecutionEvent_OutputData) isNodeExecutionEvent_OutputResult() {} + +type isNodeExecutionEvent_TargetMetadata interface { + isNodeExecutionEvent_TargetMetadata() +} + +type NodeExecutionEvent_WorkflowNodeMetadata struct { + WorkflowNodeMetadata *WorkflowNodeMetadata `protobuf:"bytes,8,opt,name=workflow_node_metadata,json=workflowNodeMetadata,proto3,oneof"` +} + +type NodeExecutionEvent_TaskNodeMetadata struct { + TaskNodeMetadata *TaskNodeMetadata `protobuf:"bytes,14,opt,name=task_node_metadata,json=taskNodeMetadata,proto3,oneof"` +} + +func (*NodeExecutionEvent_WorkflowNodeMetadata) isNodeExecutionEvent_TargetMetadata() {} + +func (*NodeExecutionEvent_TaskNodeMetadata) isNodeExecutionEvent_TargetMetadata() {} + +// For Workflow Nodes we need to send information about the workflow that's launched +type WorkflowNodeMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ExecutionId *core.WorkflowExecutionIdentifier `protobuf:"bytes,1,opt,name=execution_id,json=executionId,proto3" json:"execution_id,omitempty"` +} + +func (x *WorkflowNodeMetadata) Reset() { + *x = WorkflowNodeMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *WorkflowNodeMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*WorkflowNodeMetadata) ProtoMessage() {} + +func (x *WorkflowNodeMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use WorkflowNodeMetadata.ProtoReflect.Descriptor instead. +func (*WorkflowNodeMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{2} +} + +func (x *WorkflowNodeMetadata) GetExecutionId() *core.WorkflowExecutionIdentifier { + if x != nil { + return x.ExecutionId + } + return nil +} + +type TaskNodeMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Captures the status of caching for this execution. + CacheStatus core.CatalogCacheStatus `protobuf:"varint,1,opt,name=cache_status,json=cacheStatus,proto3,enum=flyteidl2.core.CatalogCacheStatus" json:"cache_status,omitempty"` + // This structure carries the catalog artifact information + CatalogKey *core.CatalogMetadata `protobuf:"bytes,2,opt,name=catalog_key,json=catalogKey,proto3" json:"catalog_key,omitempty"` + // Captures the status of cache reservations for this execution. + ReservationStatus core.CatalogReservation_Status `protobuf:"varint,3,opt,name=reservation_status,json=reservationStatus,proto3,enum=flyteidl2.core.CatalogReservation_Status" json:"reservation_status,omitempty"` + // The latest checkpoint location + CheckpointUri string `protobuf:"bytes,4,opt,name=checkpoint_uri,json=checkpointUri,proto3" json:"checkpoint_uri,omitempty"` +} + +func (x *TaskNodeMetadata) Reset() { + *x = TaskNodeMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskNodeMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskNodeMetadata) ProtoMessage() {} + +func (x *TaskNodeMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskNodeMetadata.ProtoReflect.Descriptor instead. +func (*TaskNodeMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{3} +} + +func (x *TaskNodeMetadata) GetCacheStatus() core.CatalogCacheStatus { + if x != nil { + return x.CacheStatus + } + return core.CatalogCacheStatus(0) +} + +func (x *TaskNodeMetadata) GetCatalogKey() *core.CatalogMetadata { + if x != nil { + return x.CatalogKey + } + return nil +} + +func (x *TaskNodeMetadata) GetReservationStatus() core.CatalogReservation_Status { + if x != nil { + return x.ReservationStatus + } + return core.CatalogReservation_Status(0) +} + +func (x *TaskNodeMetadata) GetCheckpointUri() string { + if x != nil { + return x.CheckpointUri + } + return "" +} + +type ParentTaskExecutionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id *core.TaskExecutionIdentifier `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` +} + +func (x *ParentTaskExecutionMetadata) Reset() { + *x = ParentTaskExecutionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParentTaskExecutionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParentTaskExecutionMetadata) ProtoMessage() {} + +func (x *ParentTaskExecutionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParentTaskExecutionMetadata.ProtoReflect.Descriptor instead. +func (*ParentTaskExecutionMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{4} +} + +func (x *ParentTaskExecutionMetadata) GetId() *core.TaskExecutionIdentifier { + if x != nil { + return x.Id + } + return nil +} + +type ParentNodeExecutionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique identifier of the parent node id within the execution + // This is value of core.NodeExecutionIdentifier.node_id of the parent node + NodeId string `protobuf:"bytes,1,opt,name=node_id,json=nodeId,proto3" json:"node_id,omitempty"` +} + +func (x *ParentNodeExecutionMetadata) Reset() { + *x = ParentNodeExecutionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParentNodeExecutionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParentNodeExecutionMetadata) ProtoMessage() {} + +func (x *ParentNodeExecutionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParentNodeExecutionMetadata.ProtoReflect.Descriptor instead. +func (*ParentNodeExecutionMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{5} +} + +func (x *ParentNodeExecutionMetadata) GetNodeId() string { + if x != nil { + return x.NodeId + } + return "" +} + +type EventReason struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An explanation for this event + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The time this reason occurred + OccurredAt *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=occurred_at,json=occurredAt,proto3" json:"occurred_at,omitempty"` +} + +func (x *EventReason) Reset() { + *x = EventReason{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventReason) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventReason) ProtoMessage() {} + +func (x *EventReason) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventReason.ProtoReflect.Descriptor instead. +func (*EventReason) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{6} +} + +func (x *EventReason) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *EventReason) GetOccurredAt() *timestamppb.Timestamp { + if x != nil { + return x.OccurredAt + } + return nil +} + +// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. +type TaskExecutionEvent struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // ID of the task. In combination with the retryAttempt this will indicate + // the task execution uniquely for a given parent node execution. + TaskId *core.Identifier `protobuf:"bytes,1,opt,name=task_id,json=taskId,proto3" json:"task_id,omitempty"` + // A task execution is always kicked off by a node execution, the event consumer + // will use the parent_id to relate the task to it's parent node execution + ParentNodeExecutionId *core.NodeExecutionIdentifier `protobuf:"bytes,2,opt,name=parent_node_execution_id,json=parentNodeExecutionId,proto3" json:"parent_node_execution_id,omitempty"` + // retry attempt number for this task, ie., 2 for the second attempt + RetryAttempt uint32 `protobuf:"varint,3,opt,name=retry_attempt,json=retryAttempt,proto3" json:"retry_attempt,omitempty"` + // Phase associated with the event + Phase core.TaskExecution_Phase `protobuf:"varint,4,opt,name=phase,proto3,enum=flyteidl2.core.TaskExecution_Phase" json:"phase,omitempty"` + // id of the process that sent this event, mainly for trace debugging + ProducerId string `protobuf:"bytes,5,opt,name=producer_id,json=producerId,proto3" json:"producer_id,omitempty"` + // log information for the task execution + Logs []*core.TaskLog `protobuf:"bytes,6,rep,name=logs,proto3" json:"logs,omitempty"` + // This timestamp represents when the original event occurred, it is generated + // by the executor of the task. + OccurredAt *timestamppb.Timestamp `protobuf:"bytes,7,opt,name=occurred_at,json=occurredAt,proto3" json:"occurred_at,omitempty"` + // Types that are assignable to InputValue: + // + // *TaskExecutionEvent_InputUri + // *TaskExecutionEvent_InputData + InputValue isTaskExecutionEvent_InputValue `protobuf_oneof:"input_value"` + // Types that are assignable to OutputResult: + // + // *TaskExecutionEvent_OutputUri + // *TaskExecutionEvent_Error + // *TaskExecutionEvent_OutputData + OutputResult isTaskExecutionEvent_OutputResult `protobuf_oneof:"output_result"` + // Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + CustomInfo *structpb.Struct `protobuf:"bytes,11,opt,name=custom_info,json=customInfo,proto3" json:"custom_info,omitempty"` + // Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + // that should be recorded regardless of the lack of phase change. + // The version field should be incremented when metadata changes across the duration of an individual phase. + PhaseVersion uint32 `protobuf:"varint,12,opt,name=phase_version,json=phaseVersion,proto3" json:"phase_version,omitempty"` + // An optional explanation for the phase transition. + // Deprecated: Use reasons instead. + // + // Deprecated: Marked as deprecated in flyteidl2/event/event.proto. + Reason string `protobuf:"bytes,13,opt,name=reason,proto3" json:"reason,omitempty"` + // An optional list of explanations for the phase transition. + Reasons []*EventReason `protobuf:"bytes,21,rep,name=reasons,proto3" json:"reasons,omitempty"` + // A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + // this type will be identical, but not all task executions necessarily use pre-registered definitions and this + // type is useful to render the task in the UI, filter task executions, etc. + TaskType string `protobuf:"bytes,14,opt,name=task_type,json=taskType,proto3" json:"task_type,omitempty"` + // Metadata around how a task was executed. + Metadata *TaskExecutionMetadata `protobuf:"bytes,16,opt,name=metadata,proto3" json:"metadata,omitempty"` + // The event version is used to indicate versioned changes in how data is reported using this + // proto message. For example, event_verison > 0 means that maps tasks report logs using the + // TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + // in this message. + EventVersion int32 `protobuf:"varint,18,opt,name=event_version,json=eventVersion,proto3" json:"event_version,omitempty"` + // This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + // pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + // but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + // facilitates a more accurate portrayal of the evaluation time-series. + ReportedAt *timestamppb.Timestamp `protobuf:"bytes,20,opt,name=reported_at,json=reportedAt,proto3" json:"reported_at,omitempty"` + // Contains metadata required to identify logs related to this task execution + LogContext *core.LogContext `protobuf:"bytes,22,opt,name=log_context,json=logContext,proto3" json:"log_context,omitempty"` +} + +func (x *TaskExecutionEvent) Reset() { + *x = TaskExecutionEvent{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskExecutionEvent) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskExecutionEvent) ProtoMessage() {} + +func (x *TaskExecutionEvent) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskExecutionEvent.ProtoReflect.Descriptor instead. +func (*TaskExecutionEvent) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{7} +} + +func (x *TaskExecutionEvent) GetTaskId() *core.Identifier { + if x != nil { + return x.TaskId + } + return nil +} + +func (x *TaskExecutionEvent) GetParentNodeExecutionId() *core.NodeExecutionIdentifier { + if x != nil { + return x.ParentNodeExecutionId + } + return nil +} + +func (x *TaskExecutionEvent) GetRetryAttempt() uint32 { + if x != nil { + return x.RetryAttempt + } + return 0 +} + +func (x *TaskExecutionEvent) GetPhase() core.TaskExecution_Phase { + if x != nil { + return x.Phase + } + return core.TaskExecution_Phase(0) +} + +func (x *TaskExecutionEvent) GetProducerId() string { + if x != nil { + return x.ProducerId + } + return "" +} + +func (x *TaskExecutionEvent) GetLogs() []*core.TaskLog { + if x != nil { + return x.Logs + } + return nil +} + +func (x *TaskExecutionEvent) GetOccurredAt() *timestamppb.Timestamp { + if x != nil { + return x.OccurredAt + } + return nil +} + +func (m *TaskExecutionEvent) GetInputValue() isTaskExecutionEvent_InputValue { + if m != nil { + return m.InputValue + } + return nil +} + +func (x *TaskExecutionEvent) GetInputUri() string { + if x, ok := x.GetInputValue().(*TaskExecutionEvent_InputUri); ok { + return x.InputUri + } + return "" +} + +func (x *TaskExecutionEvent) GetInputData() *core.LiteralMap { + if x, ok := x.GetInputValue().(*TaskExecutionEvent_InputData); ok { + return x.InputData + } + return nil +} + +func (m *TaskExecutionEvent) GetOutputResult() isTaskExecutionEvent_OutputResult { + if m != nil { + return m.OutputResult + } + return nil +} + +func (x *TaskExecutionEvent) GetOutputUri() string { + if x, ok := x.GetOutputResult().(*TaskExecutionEvent_OutputUri); ok { + return x.OutputUri + } + return "" +} + +func (x *TaskExecutionEvent) GetError() *core.ExecutionError { + if x, ok := x.GetOutputResult().(*TaskExecutionEvent_Error); ok { + return x.Error + } + return nil +} + +func (x *TaskExecutionEvent) GetOutputData() *core.LiteralMap { + if x, ok := x.GetOutputResult().(*TaskExecutionEvent_OutputData); ok { + return x.OutputData + } + return nil +} + +func (x *TaskExecutionEvent) GetCustomInfo() *structpb.Struct { + if x != nil { + return x.CustomInfo + } + return nil +} + +func (x *TaskExecutionEvent) GetPhaseVersion() uint32 { + if x != nil { + return x.PhaseVersion + } + return 0 +} + +// Deprecated: Marked as deprecated in flyteidl2/event/event.proto. +func (x *TaskExecutionEvent) GetReason() string { + if x != nil { + return x.Reason + } + return "" +} + +func (x *TaskExecutionEvent) GetReasons() []*EventReason { + if x != nil { + return x.Reasons + } + return nil +} + +func (x *TaskExecutionEvent) GetTaskType() string { + if x != nil { + return x.TaskType + } + return "" +} + +func (x *TaskExecutionEvent) GetMetadata() *TaskExecutionMetadata { + if x != nil { + return x.Metadata + } + return nil +} + +func (x *TaskExecutionEvent) GetEventVersion() int32 { + if x != nil { + return x.EventVersion + } + return 0 +} + +func (x *TaskExecutionEvent) GetReportedAt() *timestamppb.Timestamp { + if x != nil { + return x.ReportedAt + } + return nil +} + +func (x *TaskExecutionEvent) GetLogContext() *core.LogContext { + if x != nil { + return x.LogContext + } + return nil +} + +type isTaskExecutionEvent_InputValue interface { + isTaskExecutionEvent_InputValue() +} + +type TaskExecutionEvent_InputUri struct { + // URI of the input file, it encodes all the information + // including Cloud source provider. ie., s3://... + InputUri string `protobuf:"bytes,8,opt,name=input_uri,json=inputUri,proto3,oneof"` +} + +type TaskExecutionEvent_InputData struct { + // Raw input data consumed by this task execution. + InputData *core.LiteralMap `protobuf:"bytes,19,opt,name=input_data,json=inputData,proto3,oneof"` +} + +func (*TaskExecutionEvent_InputUri) isTaskExecutionEvent_InputValue() {} + +func (*TaskExecutionEvent_InputData) isTaskExecutionEvent_InputValue() {} + +type isTaskExecutionEvent_OutputResult interface { + isTaskExecutionEvent_OutputResult() +} + +type TaskExecutionEvent_OutputUri struct { + // URI to the output of the execution, it will be in a format that encodes all the information + // including Cloud source provider. ie., s3://... + OutputUri string `protobuf:"bytes,9,opt,name=output_uri,json=outputUri,proto3,oneof"` +} + +type TaskExecutionEvent_Error struct { + // Error information for the execution + Error *core.ExecutionError `protobuf:"bytes,10,opt,name=error,proto3,oneof"` +} + +type TaskExecutionEvent_OutputData struct { + // Raw output data produced by this task execution. + OutputData *core.LiteralMap `protobuf:"bytes,17,opt,name=output_data,json=outputData,proto3,oneof"` +} + +func (*TaskExecutionEvent_OutputUri) isTaskExecutionEvent_OutputResult() {} + +func (*TaskExecutionEvent_Error) isTaskExecutionEvent_OutputResult() {} + +func (*TaskExecutionEvent_OutputData) isTaskExecutionEvent_OutputResult() {} + +// This message contains metadata about external resources produced or used by a specific task execution. +type ExternalResourceInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + ExternalId string `protobuf:"bytes,1,opt,name=external_id,json=externalId,proto3" json:"external_id,omitempty"` + // A unique index for the external resource with respect to all external resources for this task. Although the + // identifier may change between task reporting events or retries, this will remain the same to enable aggregating + // information from multiple reports. + Index uint32 `protobuf:"varint,2,opt,name=index,proto3" json:"index,omitempty"` + // Retry attempt number for this external resource, ie., 2 for the second attempt + RetryAttempt uint32 `protobuf:"varint,3,opt,name=retry_attempt,json=retryAttempt,proto3" json:"retry_attempt,omitempty"` + // Phase associated with the external resource + Phase core.TaskExecution_Phase `protobuf:"varint,4,opt,name=phase,proto3,enum=flyteidl2.core.TaskExecution_Phase" json:"phase,omitempty"` + // Captures the status of caching for this external resource execution. + CacheStatus core.CatalogCacheStatus `protobuf:"varint,5,opt,name=cache_status,json=cacheStatus,proto3,enum=flyteidl2.core.CatalogCacheStatus" json:"cache_status,omitempty"` + // log information for the external resource execution + Logs []*core.TaskLog `protobuf:"bytes,6,rep,name=logs,proto3" json:"logs,omitempty"` + // Additional metadata to do with this event's node target based on the node type. We are + // explicitly not including the task_node_metadata here because it is not clear if it is needed. + // If we decide to include in the future, we should deprecate the cache_status field. + // + // Types that are assignable to TargetMetadata: + // + // *ExternalResourceInfo_WorkflowNodeMetadata + TargetMetadata isExternalResourceInfo_TargetMetadata `protobuf_oneof:"target_metadata"` + // Extensible field for custom, plugin-specific info + CustomInfo *structpb.Struct `protobuf:"bytes,8,opt,name=custom_info,json=customInfo,proto3" json:"custom_info,omitempty"` + // Contains metadata required to identify logs related to this task execution + LogContext *core.LogContext `protobuf:"bytes,9,opt,name=log_context,json=logContext,proto3" json:"log_context,omitempty"` +} + +func (x *ExternalResourceInfo) Reset() { + *x = ExternalResourceInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalResourceInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalResourceInfo) ProtoMessage() {} + +func (x *ExternalResourceInfo) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalResourceInfo.ProtoReflect.Descriptor instead. +func (*ExternalResourceInfo) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{8} +} + +func (x *ExternalResourceInfo) GetExternalId() string { + if x != nil { + return x.ExternalId + } + return "" +} + +func (x *ExternalResourceInfo) GetIndex() uint32 { + if x != nil { + return x.Index + } + return 0 +} + +func (x *ExternalResourceInfo) GetRetryAttempt() uint32 { + if x != nil { + return x.RetryAttempt + } + return 0 +} + +func (x *ExternalResourceInfo) GetPhase() core.TaskExecution_Phase { + if x != nil { + return x.Phase + } + return core.TaskExecution_Phase(0) +} + +func (x *ExternalResourceInfo) GetCacheStatus() core.CatalogCacheStatus { + if x != nil { + return x.CacheStatus + } + return core.CatalogCacheStatus(0) +} + +func (x *ExternalResourceInfo) GetLogs() []*core.TaskLog { + if x != nil { + return x.Logs + } + return nil +} + +func (m *ExternalResourceInfo) GetTargetMetadata() isExternalResourceInfo_TargetMetadata { + if m != nil { + return m.TargetMetadata + } + return nil +} + +func (x *ExternalResourceInfo) GetWorkflowNodeMetadata() *WorkflowNodeMetadata { + if x, ok := x.GetTargetMetadata().(*ExternalResourceInfo_WorkflowNodeMetadata); ok { + return x.WorkflowNodeMetadata + } + return nil +} + +func (x *ExternalResourceInfo) GetCustomInfo() *structpb.Struct { + if x != nil { + return x.CustomInfo + } + return nil +} + +func (x *ExternalResourceInfo) GetLogContext() *core.LogContext { + if x != nil { + return x.LogContext + } + return nil +} + +type isExternalResourceInfo_TargetMetadata interface { + isExternalResourceInfo_TargetMetadata() +} + +type ExternalResourceInfo_WorkflowNodeMetadata struct { + WorkflowNodeMetadata *WorkflowNodeMetadata `protobuf:"bytes,7,opt,name=workflow_node_metadata,json=workflowNodeMetadata,proto3,oneof"` +} + +func (*ExternalResourceInfo_WorkflowNodeMetadata) isExternalResourceInfo_TargetMetadata() {} + +// This message holds task execution metadata specific to resource allocation used to manage concurrent +// executions for a project namespace. +type ResourcePoolInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique resource ID used to identify this execution when allocating a token. + AllocationToken string `protobuf:"bytes,1,opt,name=allocation_token,json=allocationToken,proto3" json:"allocation_token,omitempty"` + // Namespace under which this task execution requested an allocation token. + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` +} + +func (x *ResourcePoolInfo) Reset() { + *x = ResourcePoolInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourcePoolInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourcePoolInfo) ProtoMessage() {} + +func (x *ResourcePoolInfo) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourcePoolInfo.ProtoReflect.Descriptor instead. +func (*ResourcePoolInfo) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{9} +} + +func (x *ResourcePoolInfo) GetAllocationToken() string { + if x != nil { + return x.AllocationToken + } + return "" +} + +func (x *ResourcePoolInfo) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +// Holds metadata around how a task was executed. +// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, +// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. +// Metadata is a container for these attributes across the task execution lifecycle. +type TaskExecutionMetadata struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique, generated name for this task execution used by the backend. + GeneratedName string `protobuf:"bytes,1,opt,name=generated_name,json=generatedName,proto3" json:"generated_name,omitempty"` + // Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + ExternalResources []*ExternalResourceInfo `protobuf:"bytes,2,rep,name=external_resources,json=externalResources,proto3" json:"external_resources,omitempty"` + // Includes additional data on concurrent resource management used during execution.. + // This is a repeated field because a plugin can request multiple resource allocations during execution. + ResourcePoolInfo []*ResourcePoolInfo `protobuf:"bytes,3,rep,name=resource_pool_info,json=resourcePoolInfo,proto3" json:"resource_pool_info,omitempty"` + // The identifier of the plugin used to execute this task. + PluginIdentifier string `protobuf:"bytes,4,opt,name=plugin_identifier,json=pluginIdentifier,proto3" json:"plugin_identifier,omitempty"` + InstanceClass TaskExecutionMetadata_InstanceClass `protobuf:"varint,16,opt,name=instance_class,json=instanceClass,proto3,enum=flyteidl2.event.TaskExecutionMetadata_InstanceClass" json:"instance_class,omitempty"` +} + +func (x *TaskExecutionMetadata) Reset() { + *x = TaskExecutionMetadata{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_event_event_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TaskExecutionMetadata) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TaskExecutionMetadata) ProtoMessage() {} + +func (x *TaskExecutionMetadata) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_event_event_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TaskExecutionMetadata.ProtoReflect.Descriptor instead. +func (*TaskExecutionMetadata) Descriptor() ([]byte, []int) { + return file_flyteidl2_event_event_proto_rawDescGZIP(), []int{10} +} + +func (x *TaskExecutionMetadata) GetGeneratedName() string { + if x != nil { + return x.GeneratedName + } + return "" +} + +func (x *TaskExecutionMetadata) GetExternalResources() []*ExternalResourceInfo { + if x != nil { + return x.ExternalResources + } + return nil +} + +func (x *TaskExecutionMetadata) GetResourcePoolInfo() []*ResourcePoolInfo { + if x != nil { + return x.ResourcePoolInfo + } + return nil +} + +func (x *TaskExecutionMetadata) GetPluginIdentifier() string { + if x != nil { + return x.PluginIdentifier + } + return "" +} + +func (x *TaskExecutionMetadata) GetInstanceClass() TaskExecutionMetadata_InstanceClass { + if x != nil { + return x.InstanceClass + } + return TaskExecutionMetadata_DEFAULT +} + +var File_flyteidl2_event_event_proto protoreflect.FileDescriptor + +var file_flyteidl2_event_event_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x1c, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x03, 0x0a, 0x16, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, + 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, + 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, + 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x55, 0x72, 0x69, 0x12, 0x36, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, + 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3d, 0x0a, 0x0b, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, + 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0xbe, 0x0a, 0x0a, + 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x39, 0x0a, + 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, + 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, + 0x72, 0x69, 0x12, 0x36, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x48, 0x01, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x01, 0x52, 0x0a, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, 0x16, 0x77, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x48, 0x02, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x51, 0x0a, 0x12, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x02, 0x52, 0x10, 0x74, 0x61, 0x73, 0x6b, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x5e, 0x0a, 0x14, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, + 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x5e, 0x0a, 0x14, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x20, 0x0a, 0x0c, + 0x73, 0x70, 0x65, 0x63, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x4e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1d, 0x0a, + 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x12, 0x19, 0x0a, 0x08, + 0x64, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x64, 0x65, 0x63, 0x6b, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, + 0x65, 0x64, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x61, 0x72, 0x72, 0x61, 0x79, + 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, + 0x3f, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x45, 0x6e, 0x74, 0x69, 0x74, 0x79, + 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x69, + 0x73, 0x49, 0x6e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x68, 0x61, 0x69, 0x6e, 0x12, + 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x65, 0x61, 0x67, 0x65, 0x72, 0x18, 0x19, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x07, 0x69, 0x73, 0x45, 0x61, 0x67, 0x65, 0x72, 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, + 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x66, 0x0a, + 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x9c, 0x02, 0x0a, 0x10, 0x54, 0x61, 0x73, 0x6b, 0x4e, 0x6f, + 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x45, 0x0a, 0x0c, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5f, 0x6b, 0x65, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0a, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x4b, 0x65, 0x79, 0x12, 0x58, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x25, 0x0a, + 0x0e, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, + 0x74, 0x55, 0x72, 0x69, 0x22, 0x56, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, 0x22, 0x36, 0x0a, 0x1b, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x17, 0x0a, 0x07, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6e, 0x6f, + 0x64, 0x65, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, + 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x22, 0xdd, 0x08, 0x0a, 0x12, 0x54, 0x61, 0x73, + 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, + 0x33, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x06, 0x74, 0x61, + 0x73, 0x6b, 0x49, 0x64, 0x12, 0x60, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, + 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, + 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, + 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, + 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x70, + 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, + 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, + 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, + 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, + 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, + 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, + 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, + 0x12, 0x3b, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x13, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, + 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, + 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x01, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x36, + 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x01, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, + 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x01, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, + 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, + 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x61, 0x73, 0x65, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x68, 0x61, 0x73, 0x65, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, 0x15, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, + 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, + 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, + 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x3b, + 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x14, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x6c, + 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0a, 0x6c, 0x6f, + 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x0d, 0x0a, 0x0b, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x8a, 0x04, 0x0a, 0x14, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, + 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, + 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, 0x39, 0x0a, + 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, 0x73, + 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x2b, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x5d, 0x0a, 0x16, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x0b, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, + 0x78, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, 0x10, 0x61, 0x6c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, + 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, 0x12, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, 0x0a, 0x11, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0e, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x43, + 0x6c, 0x61, 0x73, 0x73, 0x22, 0x2f, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, + 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x52, 0x55, 0x50, 0x54, 0x49, + 0x42, 0x4c, 0x45, 0x10, 0x01, 0x42, 0xb5, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x0a, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, + 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0xa2, 0x02, 0x03, 0x46, 0x45, 0x58, 0xaa, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xca, 0x02, 0x0f, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xe2, 0x02, 0x1b, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_event_event_proto_rawDescOnce sync.Once + file_flyteidl2_event_event_proto_rawDescData = file_flyteidl2_event_event_proto_rawDesc +) + +func file_flyteidl2_event_event_proto_rawDescGZIP() []byte { + file_flyteidl2_event_event_proto_rawDescOnce.Do(func() { + file_flyteidl2_event_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_event_event_proto_rawDescData) + }) + return file_flyteidl2_event_event_proto_rawDescData +} + +var file_flyteidl2_event_event_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_flyteidl2_event_event_proto_msgTypes = make([]protoimpl.MessageInfo, 11) +var file_flyteidl2_event_event_proto_goTypes = []interface{}{ + (TaskExecutionMetadata_InstanceClass)(0), // 0: flyteidl2.event.TaskExecutionMetadata.InstanceClass + (*WorkflowExecutionEvent)(nil), // 1: flyteidl2.event.WorkflowExecutionEvent + (*NodeExecutionEvent)(nil), // 2: flyteidl2.event.NodeExecutionEvent + (*WorkflowNodeMetadata)(nil), // 3: flyteidl2.event.WorkflowNodeMetadata + (*TaskNodeMetadata)(nil), // 4: flyteidl2.event.TaskNodeMetadata + (*ParentTaskExecutionMetadata)(nil), // 5: flyteidl2.event.ParentTaskExecutionMetadata + (*ParentNodeExecutionMetadata)(nil), // 6: flyteidl2.event.ParentNodeExecutionMetadata + (*EventReason)(nil), // 7: flyteidl2.event.EventReason + (*TaskExecutionEvent)(nil), // 8: flyteidl2.event.TaskExecutionEvent + (*ExternalResourceInfo)(nil), // 9: flyteidl2.event.ExternalResourceInfo + (*ResourcePoolInfo)(nil), // 10: flyteidl2.event.ResourcePoolInfo + (*TaskExecutionMetadata)(nil), // 11: flyteidl2.event.TaskExecutionMetadata + (*core.WorkflowExecutionIdentifier)(nil), // 12: flyteidl2.core.WorkflowExecutionIdentifier + (core.WorkflowExecution_Phase)(0), // 13: flyteidl2.core.WorkflowExecution.Phase + (*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp + (*core.ExecutionError)(nil), // 15: flyteidl2.core.ExecutionError + (*core.LiteralMap)(nil), // 16: flyteidl2.core.LiteralMap + (*core.NodeExecutionIdentifier)(nil), // 17: flyteidl2.core.NodeExecutionIdentifier + (core.NodeExecution_Phase)(0), // 18: flyteidl2.core.NodeExecution.Phase + (*core.Identifier)(nil), // 19: flyteidl2.core.Identifier + (core.CatalogCacheStatus)(0), // 20: flyteidl2.core.CatalogCacheStatus + (*core.CatalogMetadata)(nil), // 21: flyteidl2.core.CatalogMetadata + (core.CatalogReservation_Status)(0), // 22: flyteidl2.core.CatalogReservation.Status + (*core.TaskExecutionIdentifier)(nil), // 23: flyteidl2.core.TaskExecutionIdentifier + (core.TaskExecution_Phase)(0), // 24: flyteidl2.core.TaskExecution.Phase + (*core.TaskLog)(nil), // 25: flyteidl2.core.TaskLog + (*structpb.Struct)(nil), // 26: google.protobuf.Struct + (*core.LogContext)(nil), // 27: flyteidl2.core.LogContext +} +var file_flyteidl2_event_event_proto_depIdxs = []int32{ + 12, // 0: flyteidl2.event.WorkflowExecutionEvent.execution_id:type_name -> flyteidl2.core.WorkflowExecutionIdentifier + 13, // 1: flyteidl2.event.WorkflowExecutionEvent.phase:type_name -> flyteidl2.core.WorkflowExecution.Phase + 14, // 2: flyteidl2.event.WorkflowExecutionEvent.occurred_at:type_name -> google.protobuf.Timestamp + 15, // 3: flyteidl2.event.WorkflowExecutionEvent.error:type_name -> flyteidl2.core.ExecutionError + 16, // 4: flyteidl2.event.WorkflowExecutionEvent.output_data:type_name -> flyteidl2.core.LiteralMap + 17, // 5: flyteidl2.event.NodeExecutionEvent.id:type_name -> flyteidl2.core.NodeExecutionIdentifier + 18, // 6: flyteidl2.event.NodeExecutionEvent.phase:type_name -> flyteidl2.core.NodeExecution.Phase + 14, // 7: flyteidl2.event.NodeExecutionEvent.occurred_at:type_name -> google.protobuf.Timestamp + 16, // 8: flyteidl2.event.NodeExecutionEvent.input_data:type_name -> flyteidl2.core.LiteralMap + 15, // 9: flyteidl2.event.NodeExecutionEvent.error:type_name -> flyteidl2.core.ExecutionError + 16, // 10: flyteidl2.event.NodeExecutionEvent.output_data:type_name -> flyteidl2.core.LiteralMap + 3, // 11: flyteidl2.event.NodeExecutionEvent.workflow_node_metadata:type_name -> flyteidl2.event.WorkflowNodeMetadata + 4, // 12: flyteidl2.event.NodeExecutionEvent.task_node_metadata:type_name -> flyteidl2.event.TaskNodeMetadata + 5, // 13: flyteidl2.event.NodeExecutionEvent.parent_task_metadata:type_name -> flyteidl2.event.ParentTaskExecutionMetadata + 6, // 14: flyteidl2.event.NodeExecutionEvent.parent_node_metadata:type_name -> flyteidl2.event.ParentNodeExecutionMetadata + 14, // 15: flyteidl2.event.NodeExecutionEvent.reported_at:type_name -> google.protobuf.Timestamp + 19, // 16: flyteidl2.event.NodeExecutionEvent.target_entity:type_name -> flyteidl2.core.Identifier + 12, // 17: flyteidl2.event.WorkflowNodeMetadata.execution_id:type_name -> flyteidl2.core.WorkflowExecutionIdentifier + 20, // 18: flyteidl2.event.TaskNodeMetadata.cache_status:type_name -> flyteidl2.core.CatalogCacheStatus + 21, // 19: flyteidl2.event.TaskNodeMetadata.catalog_key:type_name -> flyteidl2.core.CatalogMetadata + 22, // 20: flyteidl2.event.TaskNodeMetadata.reservation_status:type_name -> flyteidl2.core.CatalogReservation.Status + 23, // 21: flyteidl2.event.ParentTaskExecutionMetadata.id:type_name -> flyteidl2.core.TaskExecutionIdentifier + 14, // 22: flyteidl2.event.EventReason.occurred_at:type_name -> google.protobuf.Timestamp + 19, // 23: flyteidl2.event.TaskExecutionEvent.task_id:type_name -> flyteidl2.core.Identifier + 17, // 24: flyteidl2.event.TaskExecutionEvent.parent_node_execution_id:type_name -> flyteidl2.core.NodeExecutionIdentifier + 24, // 25: flyteidl2.event.TaskExecutionEvent.phase:type_name -> flyteidl2.core.TaskExecution.Phase + 25, // 26: flyteidl2.event.TaskExecutionEvent.logs:type_name -> flyteidl2.core.TaskLog + 14, // 27: flyteidl2.event.TaskExecutionEvent.occurred_at:type_name -> google.protobuf.Timestamp + 16, // 28: flyteidl2.event.TaskExecutionEvent.input_data:type_name -> flyteidl2.core.LiteralMap + 15, // 29: flyteidl2.event.TaskExecutionEvent.error:type_name -> flyteidl2.core.ExecutionError + 16, // 30: flyteidl2.event.TaskExecutionEvent.output_data:type_name -> flyteidl2.core.LiteralMap + 26, // 31: flyteidl2.event.TaskExecutionEvent.custom_info:type_name -> google.protobuf.Struct + 7, // 32: flyteidl2.event.TaskExecutionEvent.reasons:type_name -> flyteidl2.event.EventReason + 11, // 33: flyteidl2.event.TaskExecutionEvent.metadata:type_name -> flyteidl2.event.TaskExecutionMetadata + 14, // 34: flyteidl2.event.TaskExecutionEvent.reported_at:type_name -> google.protobuf.Timestamp + 27, // 35: flyteidl2.event.TaskExecutionEvent.log_context:type_name -> flyteidl2.core.LogContext + 24, // 36: flyteidl2.event.ExternalResourceInfo.phase:type_name -> flyteidl2.core.TaskExecution.Phase + 20, // 37: flyteidl2.event.ExternalResourceInfo.cache_status:type_name -> flyteidl2.core.CatalogCacheStatus + 25, // 38: flyteidl2.event.ExternalResourceInfo.logs:type_name -> flyteidl2.core.TaskLog + 3, // 39: flyteidl2.event.ExternalResourceInfo.workflow_node_metadata:type_name -> flyteidl2.event.WorkflowNodeMetadata + 26, // 40: flyteidl2.event.ExternalResourceInfo.custom_info:type_name -> google.protobuf.Struct + 27, // 41: flyteidl2.event.ExternalResourceInfo.log_context:type_name -> flyteidl2.core.LogContext + 9, // 42: flyteidl2.event.TaskExecutionMetadata.external_resources:type_name -> flyteidl2.event.ExternalResourceInfo + 10, // 43: flyteidl2.event.TaskExecutionMetadata.resource_pool_info:type_name -> flyteidl2.event.ResourcePoolInfo + 0, // 44: flyteidl2.event.TaskExecutionMetadata.instance_class:type_name -> flyteidl2.event.TaskExecutionMetadata.InstanceClass + 45, // [45:45] is the sub-list for method output_type + 45, // [45:45] is the sub-list for method input_type + 45, // [45:45] is the sub-list for extension type_name + 45, // [45:45] is the sub-list for extension extendee + 0, // [0:45] is the sub-list for field type_name +} + +func init() { file_flyteidl2_event_event_proto_init() } +func file_flyteidl2_event_event_proto_init() { + if File_flyteidl2_event_event_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_event_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowExecutionEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NodeExecutionEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*WorkflowNodeMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskNodeMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParentTaskExecutionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParentNodeExecutionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventReason); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecutionEvent); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalResourceInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourcePoolInfo); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_event_event_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TaskExecutionMetadata); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_flyteidl2_event_event_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*WorkflowExecutionEvent_OutputUri)(nil), + (*WorkflowExecutionEvent_Error)(nil), + (*WorkflowExecutionEvent_OutputData)(nil), + } + file_flyteidl2_event_event_proto_msgTypes[1].OneofWrappers = []interface{}{ + (*NodeExecutionEvent_InputUri)(nil), + (*NodeExecutionEvent_InputData)(nil), + (*NodeExecutionEvent_OutputUri)(nil), + (*NodeExecutionEvent_Error)(nil), + (*NodeExecutionEvent_OutputData)(nil), + (*NodeExecutionEvent_WorkflowNodeMetadata)(nil), + (*NodeExecutionEvent_TaskNodeMetadata)(nil), + } + file_flyteidl2_event_event_proto_msgTypes[7].OneofWrappers = []interface{}{ + (*TaskExecutionEvent_InputUri)(nil), + (*TaskExecutionEvent_InputData)(nil), + (*TaskExecutionEvent_OutputUri)(nil), + (*TaskExecutionEvent_Error)(nil), + (*TaskExecutionEvent_OutputData)(nil), + } + file_flyteidl2_event_event_proto_msgTypes[8].OneofWrappers = []interface{}{ + (*ExternalResourceInfo_WorkflowNodeMetadata)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_event_event_proto_rawDesc, + NumEnums: 1, + NumMessages: 11, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_event_event_proto_goTypes, + DependencyIndexes: file_flyteidl2_event_event_proto_depIdxs, + EnumInfos: file_flyteidl2_event_event_proto_enumTypes, + MessageInfos: file_flyteidl2_event_event_proto_msgTypes, + }.Build() + File_flyteidl2_event_event_proto = out.File + file_flyteidl2_event_event_proto_rawDesc = nil + file_flyteidl2_event_event_proto_goTypes = nil + file_flyteidl2_event_event_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/event/event.pb.validate.go b/gen/go/flyteidl2/event/event.pb.validate.go new file mode 100644 index 0000000000..47f06d139b --- /dev/null +++ b/gen/go/flyteidl2/event/event.pb.validate.go @@ -0,0 +1,2551 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/event/event.proto + +package event + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = core.WorkflowExecution_Phase(0) +) + +// Validate checks the field values on WorkflowExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *WorkflowExecutionEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WorkflowExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WorkflowExecutionEventMultiError, or nil if none found. +func (m *WorkflowExecutionEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *WorkflowExecutionEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetExecutionId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExecutionId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WorkflowExecutionEventValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProducerId + + // no validation rules for Phase + + if all { + switch v := interface{}(m.GetOccurredAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOccurredAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WorkflowExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.OutputResult.(type) { + case *WorkflowExecutionEvent_OutputUri: + if v == nil { + err := WorkflowExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for OutputUri + case *WorkflowExecutionEvent_Error: + if v == nil { + err := WorkflowExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetError()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetError()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WorkflowExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *WorkflowExecutionEvent_OutputData: + if v == nil { + err := WorkflowExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputData()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WorkflowExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputData()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WorkflowExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return WorkflowExecutionEventMultiError(errors) + } + + return nil +} + +// WorkflowExecutionEventMultiError is an error wrapping multiple validation +// errors returned by WorkflowExecutionEvent.ValidateAll() if the designated +// constraints aren't met. +type WorkflowExecutionEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WorkflowExecutionEventMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WorkflowExecutionEventMultiError) AllErrors() []error { return m } + +// WorkflowExecutionEventValidationError is the validation error returned by +// WorkflowExecutionEvent.Validate if the designated constraints aren't met. +type WorkflowExecutionEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WorkflowExecutionEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WorkflowExecutionEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WorkflowExecutionEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WorkflowExecutionEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WorkflowExecutionEventValidationError) ErrorName() string { + return "WorkflowExecutionEventValidationError" +} + +// Error satisfies the builtin error interface +func (e WorkflowExecutionEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWorkflowExecutionEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WorkflowExecutionEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WorkflowExecutionEventValidationError{} + +// Validate checks the field values on NodeExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *NodeExecutionEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on NodeExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// NodeExecutionEventMultiError, or nil if none found. +func (m *NodeExecutionEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *NodeExecutionEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ProducerId + + // no validation rules for Phase + + if all { + switch v := interface{}(m.GetOccurredAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOccurredAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetParentTaskMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ParentTaskMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ParentTaskMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentTaskMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "ParentTaskMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetParentNodeMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ParentNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ParentNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentNodeMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "ParentNodeMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RetryGroup + + // no validation rules for SpecNodeId + + // no validation rules for NodeName + + // no validation rules for EventVersion + + // no validation rules for IsParent + + // no validation rules for IsDynamic + + // no validation rules for DeckUri + + if all { + switch v := interface{}(m.GetReportedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsArray + + if all { + switch v := interface{}(m.GetTargetEntity()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "TargetEntity", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "TargetEntity", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTargetEntity()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "TargetEntity", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for IsInDynamicChain + + // no validation rules for IsEager + + switch v := m.InputValue.(type) { + case *NodeExecutionEvent_InputUri: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "InputValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for InputUri + case *NodeExecutionEvent_InputData: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "InputValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInputData()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInputData()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + switch v := m.OutputResult.(type) { + case *NodeExecutionEvent_OutputUri: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for OutputUri + case *NodeExecutionEvent_Error: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetError()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetError()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *NodeExecutionEvent_OutputData: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputData()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputData()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + switch v := m.TargetMetadata.(type) { + case *NodeExecutionEvent_WorkflowNodeMetadata: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "TargetMetadata", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWorkflowNodeMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkflowNodeMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *NodeExecutionEvent_TaskNodeMetadata: + if v == nil { + err := NodeExecutionEventValidationError{ + field: "TargetMetadata", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetTaskNodeMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "TaskNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, NodeExecutionEventValidationError{ + field: "TaskNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTaskNodeMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return NodeExecutionEventValidationError{ + field: "TaskNodeMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return NodeExecutionEventMultiError(errors) + } + + return nil +} + +// NodeExecutionEventMultiError is an error wrapping multiple validation errors +// returned by NodeExecutionEvent.ValidateAll() if the designated constraints +// aren't met. +type NodeExecutionEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m NodeExecutionEventMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m NodeExecutionEventMultiError) AllErrors() []error { return m } + +// NodeExecutionEventValidationError is the validation error returned by +// NodeExecutionEvent.Validate if the designated constraints aren't met. +type NodeExecutionEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e NodeExecutionEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e NodeExecutionEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e NodeExecutionEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e NodeExecutionEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e NodeExecutionEventValidationError) ErrorName() string { + return "NodeExecutionEventValidationError" +} + +// Error satisfies the builtin error interface +func (e NodeExecutionEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sNodeExecutionEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = NodeExecutionEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = NodeExecutionEventValidationError{} + +// Validate checks the field values on WorkflowNodeMetadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *WorkflowNodeMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on WorkflowNodeMetadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// WorkflowNodeMetadataMultiError, or nil if none found. +func (m *WorkflowNodeMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *WorkflowNodeMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetExecutionId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, WorkflowNodeMetadataValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, WorkflowNodeMetadataValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetExecutionId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return WorkflowNodeMetadataValidationError{ + field: "ExecutionId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return WorkflowNodeMetadataMultiError(errors) + } + + return nil +} + +// WorkflowNodeMetadataMultiError is an error wrapping multiple validation +// errors returned by WorkflowNodeMetadata.ValidateAll() if the designated +// constraints aren't met. +type WorkflowNodeMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m WorkflowNodeMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m WorkflowNodeMetadataMultiError) AllErrors() []error { return m } + +// WorkflowNodeMetadataValidationError is the validation error returned by +// WorkflowNodeMetadata.Validate if the designated constraints aren't met. +type WorkflowNodeMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e WorkflowNodeMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e WorkflowNodeMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e WorkflowNodeMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e WorkflowNodeMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e WorkflowNodeMetadataValidationError) ErrorName() string { + return "WorkflowNodeMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e WorkflowNodeMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sWorkflowNodeMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = WorkflowNodeMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = WorkflowNodeMetadataValidationError{} + +// Validate checks the field values on TaskNodeMetadata with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *TaskNodeMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TaskNodeMetadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TaskNodeMetadataMultiError, or nil if none found. +func (m *TaskNodeMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *TaskNodeMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for CacheStatus + + if all { + switch v := interface{}(m.GetCatalogKey()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskNodeMetadataValidationError{ + field: "CatalogKey", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskNodeMetadataValidationError{ + field: "CatalogKey", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCatalogKey()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskNodeMetadataValidationError{ + field: "CatalogKey", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for ReservationStatus + + // no validation rules for CheckpointUri + + if len(errors) > 0 { + return TaskNodeMetadataMultiError(errors) + } + + return nil +} + +// TaskNodeMetadataMultiError is an error wrapping multiple validation errors +// returned by TaskNodeMetadata.ValidateAll() if the designated constraints +// aren't met. +type TaskNodeMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TaskNodeMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TaskNodeMetadataMultiError) AllErrors() []error { return m } + +// TaskNodeMetadataValidationError is the validation error returned by +// TaskNodeMetadata.Validate if the designated constraints aren't met. +type TaskNodeMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TaskNodeMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TaskNodeMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TaskNodeMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TaskNodeMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TaskNodeMetadataValidationError) ErrorName() string { return "TaskNodeMetadataValidationError" } + +// Error satisfies the builtin error interface +func (e TaskNodeMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTaskNodeMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TaskNodeMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TaskNodeMetadataValidationError{} + +// Validate checks the field values on ParentTaskExecutionMetadata with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ParentTaskExecutionMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ParentTaskExecutionMetadata with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ParentTaskExecutionMetadataMultiError, or nil if none found. +func (m *ParentTaskExecutionMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *ParentTaskExecutionMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ParentTaskExecutionMetadataValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ParentTaskExecutionMetadataValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ParentTaskExecutionMetadataValidationError{ + field: "Id", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return ParentTaskExecutionMetadataMultiError(errors) + } + + return nil +} + +// ParentTaskExecutionMetadataMultiError is an error wrapping multiple +// validation errors returned by ParentTaskExecutionMetadata.ValidateAll() if +// the designated constraints aren't met. +type ParentTaskExecutionMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ParentTaskExecutionMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ParentTaskExecutionMetadataMultiError) AllErrors() []error { return m } + +// ParentTaskExecutionMetadataValidationError is the validation error returned +// by ParentTaskExecutionMetadata.Validate if the designated constraints +// aren't met. +type ParentTaskExecutionMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ParentTaskExecutionMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ParentTaskExecutionMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ParentTaskExecutionMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ParentTaskExecutionMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ParentTaskExecutionMetadataValidationError) ErrorName() string { + return "ParentTaskExecutionMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e ParentTaskExecutionMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sParentTaskExecutionMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ParentTaskExecutionMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ParentTaskExecutionMetadataValidationError{} + +// Validate checks the field values on ParentNodeExecutionMetadata with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ParentNodeExecutionMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ParentNodeExecutionMetadata with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ParentNodeExecutionMetadataMultiError, or nil if none found. +func (m *ParentNodeExecutionMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *ParentNodeExecutionMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NodeId + + if len(errors) > 0 { + return ParentNodeExecutionMetadataMultiError(errors) + } + + return nil +} + +// ParentNodeExecutionMetadataMultiError is an error wrapping multiple +// validation errors returned by ParentNodeExecutionMetadata.ValidateAll() if +// the designated constraints aren't met. +type ParentNodeExecutionMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ParentNodeExecutionMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ParentNodeExecutionMetadataMultiError) AllErrors() []error { return m } + +// ParentNodeExecutionMetadataValidationError is the validation error returned +// by ParentNodeExecutionMetadata.Validate if the designated constraints +// aren't met. +type ParentNodeExecutionMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ParentNodeExecutionMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ParentNodeExecutionMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ParentNodeExecutionMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ParentNodeExecutionMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ParentNodeExecutionMetadataValidationError) ErrorName() string { + return "ParentNodeExecutionMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e ParentNodeExecutionMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sParentNodeExecutionMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ParentNodeExecutionMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ParentNodeExecutionMetadataValidationError{} + +// Validate checks the field values on EventReason with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *EventReason) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on EventReason with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in EventReasonMultiError, or +// nil if none found. +func (m *EventReason) ValidateAll() error { + return m.validate(true) +} + +func (m *EventReason) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Reason + + if all { + switch v := interface{}(m.GetOccurredAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, EventReasonValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, EventReasonValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOccurredAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return EventReasonValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return EventReasonMultiError(errors) + } + + return nil +} + +// EventReasonMultiError is an error wrapping multiple validation errors +// returned by EventReason.ValidateAll() if the designated constraints aren't met. +type EventReasonMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m EventReasonMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m EventReasonMultiError) AllErrors() []error { return m } + +// EventReasonValidationError is the validation error returned by +// EventReason.Validate if the designated constraints aren't met. +type EventReasonValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e EventReasonValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e EventReasonValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e EventReasonValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e EventReasonValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e EventReasonValidationError) ErrorName() string { return "EventReasonValidationError" } + +// Error satisfies the builtin error interface +func (e EventReasonValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sEventReason.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = EventReasonValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = EventReasonValidationError{} + +// Validate checks the field values on TaskExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TaskExecutionEvent) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TaskExecutionEvent with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TaskExecutionEventMultiError, or nil if none found. +func (m *TaskExecutionEvent) ValidateAll() error { + return m.validate(true) +} + +func (m *TaskExecutionEvent) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetTaskId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "TaskId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "TaskId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetTaskId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "TaskId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetParentNodeExecutionId()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "ParentNodeExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "ParentNodeExecutionId", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetParentNodeExecutionId()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "ParentNodeExecutionId", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RetryAttempt + + // no validation rules for Phase + + // no validation rules for ProducerId + + for idx, item := range m.GetLogs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetOccurredAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOccurredAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "OccurredAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetCustomInfo()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomInfo()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for PhaseVersion + + // no validation rules for Reason + + for idx, item := range m.GetReasons() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: fmt.Sprintf("Reasons[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: fmt.Sprintf("Reasons[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: fmt.Sprintf("Reasons[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for TaskType + + if all { + switch v := interface{}(m.GetMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "Metadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for EventVersion + + if all { + switch v := interface{}(m.GetReportedAt()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetReportedAt()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "ReportedAt", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLogContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.InputValue.(type) { + case *TaskExecutionEvent_InputUri: + if v == nil { + err := TaskExecutionEventValidationError{ + field: "InputValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for InputUri + case *TaskExecutionEvent_InputData: + if v == nil { + err := TaskExecutionEventValidationError{ + field: "InputValue", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetInputData()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetInputData()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "InputData", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + switch v := m.OutputResult.(type) { + case *TaskExecutionEvent_OutputUri: + if v == nil { + err := TaskExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + // no validation rules for OutputUri + case *TaskExecutionEvent_Error: + if v == nil { + err := TaskExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetError()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetError()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "Error", + reason: "embedded message failed validation", + cause: err, + } + } + } + + case *TaskExecutionEvent_OutputData: + if v == nil { + err := TaskExecutionEventValidationError{ + field: "OutputResult", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetOutputData()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetOutputData()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionEventValidationError{ + field: "OutputData", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return TaskExecutionEventMultiError(errors) + } + + return nil +} + +// TaskExecutionEventMultiError is an error wrapping multiple validation errors +// returned by TaskExecutionEvent.ValidateAll() if the designated constraints +// aren't met. +type TaskExecutionEventMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TaskExecutionEventMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TaskExecutionEventMultiError) AllErrors() []error { return m } + +// TaskExecutionEventValidationError is the validation error returned by +// TaskExecutionEvent.Validate if the designated constraints aren't met. +type TaskExecutionEventValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TaskExecutionEventValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TaskExecutionEventValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TaskExecutionEventValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TaskExecutionEventValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TaskExecutionEventValidationError) ErrorName() string { + return "TaskExecutionEventValidationError" +} + +// Error satisfies the builtin error interface +func (e TaskExecutionEventValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTaskExecutionEvent.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TaskExecutionEventValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TaskExecutionEventValidationError{} + +// Validate checks the field values on ExternalResourceInfo with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ExternalResourceInfo) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ExternalResourceInfo with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ExternalResourceInfoMultiError, or nil if none found. +func (m *ExternalResourceInfo) ValidateAll() error { + return m.validate(true) +} + +func (m *ExternalResourceInfo) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ExternalId + + // no validation rules for Index + + // no validation rules for RetryAttempt + + // no validation rules for Phase + + // no validation rules for CacheStatus + + for idx, item := range m.GetLogs() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExternalResourceInfoValidationError{ + field: fmt.Sprintf("Logs[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if all { + switch v := interface{}(m.GetCustomInfo()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCustomInfo()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExternalResourceInfoValidationError{ + field: "CustomInfo", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLogContext()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLogContext()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExternalResourceInfoValidationError{ + field: "LogContext", + reason: "embedded message failed validation", + cause: err, + } + } + } + + switch v := m.TargetMetadata.(type) { + case *ExternalResourceInfo_WorkflowNodeMetadata: + if v == nil { + err := ExternalResourceInfoValidationError{ + field: "TargetMetadata", + reason: "oneof value cannot be a typed-nil", + } + if !all { + return err + } + errors = append(errors, err) + } + + if all { + switch v := interface{}(m.GetWorkflowNodeMetadata()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, ExternalResourceInfoValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkflowNodeMetadata()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return ExternalResourceInfoValidationError{ + field: "WorkflowNodeMetadata", + reason: "embedded message failed validation", + cause: err, + } + } + } + + default: + _ = v // ensures v is used + } + + if len(errors) > 0 { + return ExternalResourceInfoMultiError(errors) + } + + return nil +} + +// ExternalResourceInfoMultiError is an error wrapping multiple validation +// errors returned by ExternalResourceInfo.ValidateAll() if the designated +// constraints aren't met. +type ExternalResourceInfoMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ExternalResourceInfoMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ExternalResourceInfoMultiError) AllErrors() []error { return m } + +// ExternalResourceInfoValidationError is the validation error returned by +// ExternalResourceInfo.Validate if the designated constraints aren't met. +type ExternalResourceInfoValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ExternalResourceInfoValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ExternalResourceInfoValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ExternalResourceInfoValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ExternalResourceInfoValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ExternalResourceInfoValidationError) ErrorName() string { + return "ExternalResourceInfoValidationError" +} + +// Error satisfies the builtin error interface +func (e ExternalResourceInfoValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sExternalResourceInfo.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ExternalResourceInfoValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ExternalResourceInfoValidationError{} + +// Validate checks the field values on ResourcePoolInfo with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *ResourcePoolInfo) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ResourcePoolInfo with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// ResourcePoolInfoMultiError, or nil if none found. +func (m *ResourcePoolInfo) ValidateAll() error { + return m.validate(true) +} + +func (m *ResourcePoolInfo) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for AllocationToken + + // no validation rules for Namespace + + if len(errors) > 0 { + return ResourcePoolInfoMultiError(errors) + } + + return nil +} + +// ResourcePoolInfoMultiError is an error wrapping multiple validation errors +// returned by ResourcePoolInfo.ValidateAll() if the designated constraints +// aren't met. +type ResourcePoolInfoMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ResourcePoolInfoMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ResourcePoolInfoMultiError) AllErrors() []error { return m } + +// ResourcePoolInfoValidationError is the validation error returned by +// ResourcePoolInfo.Validate if the designated constraints aren't met. +type ResourcePoolInfoValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ResourcePoolInfoValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ResourcePoolInfoValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ResourcePoolInfoValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ResourcePoolInfoValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ResourcePoolInfoValidationError) ErrorName() string { return "ResourcePoolInfoValidationError" } + +// Error satisfies the builtin error interface +func (e ResourcePoolInfoValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sResourcePoolInfo.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ResourcePoolInfoValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ResourcePoolInfoValidationError{} + +// Validate checks the field values on TaskExecutionMetadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *TaskExecutionMetadata) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on TaskExecutionMetadata with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// TaskExecutionMetadataMultiError, or nil if none found. +func (m *TaskExecutionMetadata) ValidateAll() error { + return m.validate(true) +} + +func (m *TaskExecutionMetadata) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for GeneratedName + + for idx, item := range m.GetExternalResources() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ExternalResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ExternalResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ExternalResources[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + for idx, item := range m.GetResourcePoolInfo() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ResourcePoolInfo[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ResourcePoolInfo[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return TaskExecutionMetadataValidationError{ + field: fmt.Sprintf("ResourcePoolInfo[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + // no validation rules for PluginIdentifier + + // no validation rules for InstanceClass + + if len(errors) > 0 { + return TaskExecutionMetadataMultiError(errors) + } + + return nil +} + +// TaskExecutionMetadataMultiError is an error wrapping multiple validation +// errors returned by TaskExecutionMetadata.ValidateAll() if the designated +// constraints aren't met. +type TaskExecutionMetadataMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m TaskExecutionMetadataMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m TaskExecutionMetadataMultiError) AllErrors() []error { return m } + +// TaskExecutionMetadataValidationError is the validation error returned by +// TaskExecutionMetadata.Validate if the designated constraints aren't met. +type TaskExecutionMetadataValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e TaskExecutionMetadataValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e TaskExecutionMetadataValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e TaskExecutionMetadataValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e TaskExecutionMetadataValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e TaskExecutionMetadataValidationError) ErrorName() string { + return "TaskExecutionMetadataValidationError" +} + +// Error satisfies the builtin error interface +func (e TaskExecutionMetadataValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sTaskExecutionMetadata.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = TaskExecutionMetadataValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = TaskExecutionMetadataValidationError{} diff --git a/gen/go/flyteidl2/plugins/common.pb.go b/gen/go/flyteidl2/plugins/common.pb.go new file mode 100644 index 0000000000..f57ffc6d14 --- /dev/null +++ b/gen/go/flyteidl2/plugins/common.pb.go @@ -0,0 +1,257 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/common.proto + +package plugins + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type RestartPolicy int32 + +const ( + RestartPolicy_RESTART_POLICY_NEVER RestartPolicy = 0 + RestartPolicy_RESTART_POLICY_ON_FAILURE RestartPolicy = 1 + RestartPolicy_RESTART_POLICY_ALWAYS RestartPolicy = 2 +) + +// Enum value maps for RestartPolicy. +var ( + RestartPolicy_name = map[int32]string{ + 0: "RESTART_POLICY_NEVER", + 1: "RESTART_POLICY_ON_FAILURE", + 2: "RESTART_POLICY_ALWAYS", + } + RestartPolicy_value = map[string]int32{ + "RESTART_POLICY_NEVER": 0, + "RESTART_POLICY_ON_FAILURE": 1, + "RESTART_POLICY_ALWAYS": 2, + } +) + +func (x RestartPolicy) Enum() *RestartPolicy { + p := new(RestartPolicy) + *p = x + return p +} + +func (x RestartPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (RestartPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_plugins_common_proto_enumTypes[0].Descriptor() +} + +func (RestartPolicy) Type() protoreflect.EnumType { + return &file_flyteidl2_plugins_common_proto_enumTypes[0] +} + +func (x RestartPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use RestartPolicy.Descriptor instead. +func (RestartPolicy) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_plugins_common_proto_rawDescGZIP(), []int{0} +} + +type CommonReplicaSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Number of replicas + Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` + // Image used for the replica group + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // RestartPolicy determines whether pods will be restarted when they exit + RestartPolicy RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl2.plugins.RestartPolicy" json:"restart_policy,omitempty"` +} + +func (x *CommonReplicaSpec) Reset() { + *x = CommonReplicaSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CommonReplicaSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CommonReplicaSpec) ProtoMessage() {} + +func (x *CommonReplicaSpec) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CommonReplicaSpec.ProtoReflect.Descriptor instead. +func (*CommonReplicaSpec) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_common_proto_rawDescGZIP(), []int{0} +} + +func (x *CommonReplicaSpec) GetReplicas() int32 { + if x != nil { + return x.Replicas + } + return 0 +} + +func (x *CommonReplicaSpec) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +func (x *CommonReplicaSpec) GetResources() *core.Resources { + if x != nil { + return x.Resources + } + return nil +} + +func (x *CommonReplicaSpec) GetRestartPolicy() RestartPolicy { + if x != nil { + return x.RestartPolicy + } + return RestartPolicy_RESTART_POLICY_NEVER +} + +var File_flyteidl2_plugins_common_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_common_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xc7, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, + 0x12, 0x47, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2a, 0x63, 0x0a, 0x0d, 0x52, 0x65, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x52, 0x45, + 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4e, 0x45, 0x56, + 0x45, 0x52, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, + 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, + 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x10, 0x02, 0x42, 0xc2, + 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, + 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_common_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_common_proto_rawDescData = file_flyteidl2_plugins_common_proto_rawDesc +) + +func file_flyteidl2_plugins_common_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_common_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_common_proto_rawDescData) + }) + return file_flyteidl2_plugins_common_proto_rawDescData +} + +var file_flyteidl2_plugins_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_flyteidl2_plugins_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_flyteidl2_plugins_common_proto_goTypes = []interface{}{ + (RestartPolicy)(0), // 0: flyteidl2.plugins.RestartPolicy + (*CommonReplicaSpec)(nil), // 1: flyteidl2.plugins.CommonReplicaSpec + (*core.Resources)(nil), // 2: flyteidl2.core.Resources +} +var file_flyteidl2_plugins_common_proto_depIdxs = []int32{ + 2, // 0: flyteidl2.plugins.CommonReplicaSpec.resources:type_name -> flyteidl2.core.Resources + 0, // 1: flyteidl2.plugins.CommonReplicaSpec.restart_policy:type_name -> flyteidl2.plugins.RestartPolicy + 2, // [2:2] is the sub-list for method output_type + 2, // [2:2] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_common_proto_init() } +func file_flyteidl2_plugins_common_proto_init() { + if File_flyteidl2_plugins_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CommonReplicaSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_common_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_common_proto_depIdxs, + EnumInfos: file_flyteidl2_plugins_common_proto_enumTypes, + MessageInfos: file_flyteidl2_plugins_common_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_common_proto = out.File + file_flyteidl2_plugins_common_proto_rawDesc = nil + file_flyteidl2_plugins_common_proto_goTypes = nil + file_flyteidl2_plugins_common_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/common.pb.validate.go b/gen/go/flyteidl2/plugins/common.pb.validate.go new file mode 100644 index 0000000000..77b315809a --- /dev/null +++ b/gen/go/flyteidl2/plugins/common.pb.validate.go @@ -0,0 +1,173 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/common.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on CommonReplicaSpec with the rules defined +// in the proto definition for this message. If any rules are violated, the +// first error encountered is returned, or nil if there are no violations. +func (m *CommonReplicaSpec) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on CommonReplicaSpec with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// CommonReplicaSpecMultiError, or nil if none found. +func (m *CommonReplicaSpec) ValidateAll() error { + return m.validate(true) +} + +func (m *CommonReplicaSpec) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Replicas + + // no validation rules for Image + + if all { + switch v := interface{}(m.GetResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, CommonReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, CommonReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return CommonReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RestartPolicy + + if len(errors) > 0 { + return CommonReplicaSpecMultiError(errors) + } + + return nil +} + +// CommonReplicaSpecMultiError is an error wrapping multiple validation errors +// returned by CommonReplicaSpec.ValidateAll() if the designated constraints +// aren't met. +type CommonReplicaSpecMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m CommonReplicaSpecMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m CommonReplicaSpecMultiError) AllErrors() []error { return m } + +// CommonReplicaSpecValidationError is the validation error returned by +// CommonReplicaSpec.Validate if the designated constraints aren't met. +type CommonReplicaSpecValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e CommonReplicaSpecValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e CommonReplicaSpecValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e CommonReplicaSpecValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e CommonReplicaSpecValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e CommonReplicaSpecValidationError) ErrorName() string { + return "CommonReplicaSpecValidationError" +} + +// Error satisfies the builtin error interface +func (e CommonReplicaSpecValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sCommonReplicaSpec.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = CommonReplicaSpecValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = CommonReplicaSpecValidationError{} diff --git a/gen/go/flyteidl2/plugins/kubeflow/common.pb.go b/gen/go/flyteidl2/plugins/kubeflow/common.pb.go new file mode 100644 index 0000000000..7f73a0ac83 --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/common.pb.go @@ -0,0 +1,261 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/kubeflow/common.proto + +package kubeflow + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type CleanPodPolicy int32 + +const ( + CleanPodPolicy_CLEANPOD_POLICY_NONE CleanPodPolicy = 0 + CleanPodPolicy_CLEANPOD_POLICY_RUNNING CleanPodPolicy = 1 + CleanPodPolicy_CLEANPOD_POLICY_ALL CleanPodPolicy = 2 +) + +// Enum value maps for CleanPodPolicy. +var ( + CleanPodPolicy_name = map[int32]string{ + 0: "CLEANPOD_POLICY_NONE", + 1: "CLEANPOD_POLICY_RUNNING", + 2: "CLEANPOD_POLICY_ALL", + } + CleanPodPolicy_value = map[string]int32{ + "CLEANPOD_POLICY_NONE": 0, + "CLEANPOD_POLICY_RUNNING": 1, + "CLEANPOD_POLICY_ALL": 2, + } +) + +func (x CleanPodPolicy) Enum() *CleanPodPolicy { + p := new(CleanPodPolicy) + *p = x + return p +} + +func (x CleanPodPolicy) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (CleanPodPolicy) Descriptor() protoreflect.EnumDescriptor { + return file_flyteidl2_plugins_kubeflow_common_proto_enumTypes[0].Descriptor() +} + +func (CleanPodPolicy) Type() protoreflect.EnumType { + return &file_flyteidl2_plugins_kubeflow_common_proto_enumTypes[0] +} + +func (x CleanPodPolicy) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use CleanPodPolicy.Descriptor instead. +func (CleanPodPolicy) EnumDescriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_common_proto_rawDescGZIP(), []int{0} +} + +type RunPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Defines the policy to kill pods after the job completes. Default to None. + CleanPodPolicy CleanPodPolicy `protobuf:"varint,1,opt,name=clean_pod_policy,json=cleanPodPolicy,proto3,enum=flyteidl2.plugins.kubeflow.CleanPodPolicy" json:"clean_pod_policy,omitempty"` + // TTL to clean up jobs. Default to infinite. + TtlSecondsAfterFinished int32 `protobuf:"varint,2,opt,name=ttl_seconds_after_finished,json=ttlSecondsAfterFinished,proto3" json:"ttl_seconds_after_finished,omitempty"` + // Specifies the duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer. + ActiveDeadlineSeconds int32 `protobuf:"varint,3,opt,name=active_deadline_seconds,json=activeDeadlineSeconds,proto3" json:"active_deadline_seconds,omitempty"` + // Number of retries before marking this job failed. + BackoffLimit int32 `protobuf:"varint,4,opt,name=backoff_limit,json=backoffLimit,proto3" json:"backoff_limit,omitempty"` +} + +func (x *RunPolicy) Reset() { + *x = RunPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RunPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RunPolicy) ProtoMessage() {} + +func (x *RunPolicy) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RunPolicy.ProtoReflect.Descriptor instead. +func (*RunPolicy) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_common_proto_rawDescGZIP(), []int{0} +} + +func (x *RunPolicy) GetCleanPodPolicy() CleanPodPolicy { + if x != nil { + return x.CleanPodPolicy + } + return CleanPodPolicy_CLEANPOD_POLICY_NONE +} + +func (x *RunPolicy) GetTtlSecondsAfterFinished() int32 { + if x != nil { + return x.TtlSecondsAfterFinished + } + return 0 +} + +func (x *RunPolicy) GetActiveDeadlineSeconds() int32 { + if x != nil { + return x.ActiveDeadlineSeconds + } + return 0 +} + +func (x *RunPolicy) GetBackoffLimit() int32 { + if x != nil { + return x.BackoffLimit + } + return 0 +} + +var File_flyteidl2_plugins_kubeflow_common_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_kubeflow_common_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xfb, 0x01, 0x0a, 0x09, 0x52, 0x75, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x54, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x5f, 0x70, 0x6f, 0x64, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2a, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x6c, 0x65, 0x61, 0x6e, + 0x50, 0x6f, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x63, 0x6c, 0x65, 0x61, 0x6e, + 0x50, 0x6f, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x74, 0x6c, + 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x66, + 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x74, + 0x74, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x46, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x44, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x23, + 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x2a, 0x60, 0x0a, 0x0e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x64, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x50, 0x4f, + 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, 0x10, 0x00, 0x12, + 0x1b, 0x0a, 0x17, 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x50, 0x4f, 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x49, + 0x43, 0x59, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, + 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x50, 0x4f, 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, + 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0xf9, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, + 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_kubeflow_common_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_kubeflow_common_proto_rawDescData = file_flyteidl2_plugins_kubeflow_common_proto_rawDesc +) + +func file_flyteidl2_plugins_kubeflow_common_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_kubeflow_common_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_kubeflow_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_kubeflow_common_proto_rawDescData) + }) + return file_flyteidl2_plugins_kubeflow_common_proto_rawDescData +} + +var file_flyteidl2_plugins_kubeflow_common_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_flyteidl2_plugins_kubeflow_common_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_flyteidl2_plugins_kubeflow_common_proto_goTypes = []interface{}{ + (CleanPodPolicy)(0), // 0: flyteidl2.plugins.kubeflow.CleanPodPolicy + (*RunPolicy)(nil), // 1: flyteidl2.plugins.kubeflow.RunPolicy +} +var file_flyteidl2_plugins_kubeflow_common_proto_depIdxs = []int32{ + 0, // 0: flyteidl2.plugins.kubeflow.RunPolicy.clean_pod_policy:type_name -> flyteidl2.plugins.kubeflow.CleanPodPolicy + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_kubeflow_common_proto_init() } +func file_flyteidl2_plugins_kubeflow_common_proto_init() { + if File_flyteidl2_plugins_kubeflow_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_kubeflow_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RunPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_kubeflow_common_proto_rawDesc, + NumEnums: 1, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_kubeflow_common_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_kubeflow_common_proto_depIdxs, + EnumInfos: file_flyteidl2_plugins_kubeflow_common_proto_enumTypes, + MessageInfos: file_flyteidl2_plugins_kubeflow_common_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_kubeflow_common_proto = out.File + file_flyteidl2_plugins_kubeflow_common_proto_rawDesc = nil + file_flyteidl2_plugins_kubeflow_common_proto_goTypes = nil + file_flyteidl2_plugins_kubeflow_common_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/kubeflow/common.pb.validate.go b/gen/go/flyteidl2/plugins/kubeflow/common.pb.validate.go new file mode 100644 index 0000000000..0a84bcec04 --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/common.pb.validate.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/kubeflow/common.proto + +package kubeflow + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on RunPolicy with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *RunPolicy) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on RunPolicy with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in RunPolicyMultiError, or nil +// if none found. +func (m *RunPolicy) ValidateAll() error { + return m.validate(true) +} + +func (m *RunPolicy) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for CleanPodPolicy + + // no validation rules for TtlSecondsAfterFinished + + // no validation rules for ActiveDeadlineSeconds + + // no validation rules for BackoffLimit + + if len(errors) > 0 { + return RunPolicyMultiError(errors) + } + + return nil +} + +// RunPolicyMultiError is an error wrapping multiple validation errors returned +// by RunPolicy.ValidateAll() if the designated constraints aren't met. +type RunPolicyMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m RunPolicyMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m RunPolicyMultiError) AllErrors() []error { return m } + +// RunPolicyValidationError is the validation error returned by +// RunPolicy.Validate if the designated constraints aren't met. +type RunPolicyValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e RunPolicyValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e RunPolicyValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e RunPolicyValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e RunPolicyValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e RunPolicyValidationError) ErrorName() string { return "RunPolicyValidationError" } + +// Error satisfies the builtin error interface +func (e RunPolicyValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sRunPolicy.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = RunPolicyValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = RunPolicyValidationError{} diff --git a/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.go b/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.go new file mode 100644 index 0000000000..34d4042e43 --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.go @@ -0,0 +1,368 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/kubeflow/mpi.proto + +package kubeflow + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +type DistributedMPITrainingTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Worker replicas spec + WorkerReplicas *DistributedMPITrainingReplicaSpec `protobuf:"bytes,1,opt,name=worker_replicas,json=workerReplicas,proto3" json:"worker_replicas,omitempty"` + // Master replicas spec + LauncherReplicas *DistributedMPITrainingReplicaSpec `protobuf:"bytes,2,opt,name=launcher_replicas,json=launcherReplicas,proto3" json:"launcher_replicas,omitempty"` + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + // Number of slots per worker + Slots int32 `protobuf:"varint,4,opt,name=slots,proto3" json:"slots,omitempty"` +} + +func (x *DistributedMPITrainingTask) Reset() { + *x = DistributedMPITrainingTask{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedMPITrainingTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedMPITrainingTask) ProtoMessage() {} + +func (x *DistributedMPITrainingTask) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedMPITrainingTask.ProtoReflect.Descriptor instead. +func (*DistributedMPITrainingTask) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescGZIP(), []int{0} +} + +func (x *DistributedMPITrainingTask) GetWorkerReplicas() *DistributedMPITrainingReplicaSpec { + if x != nil { + return x.WorkerReplicas + } + return nil +} + +func (x *DistributedMPITrainingTask) GetLauncherReplicas() *DistributedMPITrainingReplicaSpec { + if x != nil { + return x.LauncherReplicas + } + return nil +} + +func (x *DistributedMPITrainingTask) GetRunPolicy() *RunPolicy { + if x != nil { + return x.RunPolicy + } + return nil +} + +func (x *DistributedMPITrainingTask) GetSlots() int32 { + if x != nil { + return x.Slots + } + return 0 +} + +// Replica specification for distributed MPI training +type DistributedMPITrainingReplicaSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // 1~4 deprecated. Use common instead. + // Number of replicas + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. + Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` + // Image used for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // Restart policy determines whether pods will be restarted when they exit + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. + RestartPolicy plugins.RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl2.plugins.RestartPolicy" json:"restart_policy,omitempty"` + // MPI sometimes requires different command set for different replica groups + Command []string `protobuf:"bytes,5,rep,name=command,proto3" json:"command,omitempty"` + // The common replica spec + Common *plugins.CommonReplicaSpec `protobuf:"bytes,6,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DistributedMPITrainingReplicaSpec) Reset() { + *x = DistributedMPITrainingReplicaSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedMPITrainingReplicaSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedMPITrainingReplicaSpec) ProtoMessage() {} + +func (x *DistributedMPITrainingReplicaSpec) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedMPITrainingReplicaSpec.ProtoReflect.Descriptor instead. +func (*DistributedMPITrainingReplicaSpec) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. +func (x *DistributedMPITrainingReplicaSpec) GetReplicas() int32 { + if x != nil { + return x.Replicas + } + return 0 +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. +func (x *DistributedMPITrainingReplicaSpec) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. +func (x *DistributedMPITrainingReplicaSpec) GetResources() *core.Resources { + if x != nil { + return x.Resources + } + return nil +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/mpi.proto. +func (x *DistributedMPITrainingReplicaSpec) GetRestartPolicy() plugins.RestartPolicy { + if x != nil { + return x.RestartPolicy + } + return plugins.RestartPolicy(0) +} + +func (x *DistributedMPITrainingReplicaSpec) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +func (x *DistributedMPITrainingReplicaSpec) GetCommon() *plugins.CommonReplicaSpec { + if x != nil { + return x.Common + } + return nil +} + +var File_flyteidl2_plugins_kubeflow_mpi_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_kubeflow_mpi_proto_rawDesc = []byte{ + 0x0a, 0x24, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6d, 0x70, 0x69, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, + 0x6f, 0x77, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x02, 0x0a, 0x1a, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x66, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x3d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6a, + 0x0a, 0x11, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x10, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, + 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, + 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x22, 0xbf, 0x02, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x08, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, + 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0xf6, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x08, 0x4d, 0x70, 0x69, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, + 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescData = file_flyteidl2_plugins_kubeflow_mpi_proto_rawDesc +) + +func file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescData) + }) + return file_flyteidl2_plugins_kubeflow_mpi_proto_rawDescData +} + +var file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_flyteidl2_plugins_kubeflow_mpi_proto_goTypes = []interface{}{ + (*DistributedMPITrainingTask)(nil), // 0: flyteidl2.plugins.kubeflow.DistributedMPITrainingTask + (*DistributedMPITrainingReplicaSpec)(nil), // 1: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec + (*RunPolicy)(nil), // 2: flyteidl2.plugins.kubeflow.RunPolicy + (*core.Resources)(nil), // 3: flyteidl2.core.Resources + (plugins.RestartPolicy)(0), // 4: flyteidl2.plugins.RestartPolicy + (*plugins.CommonReplicaSpec)(nil), // 5: flyteidl2.plugins.CommonReplicaSpec +} +var file_flyteidl2_plugins_kubeflow_mpi_proto_depIdxs = []int32{ + 1, // 0: flyteidl2.plugins.kubeflow.DistributedMPITrainingTask.worker_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec + 1, // 1: flyteidl2.plugins.kubeflow.DistributedMPITrainingTask.launcher_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec + 2, // 2: flyteidl2.plugins.kubeflow.DistributedMPITrainingTask.run_policy:type_name -> flyteidl2.plugins.kubeflow.RunPolicy + 3, // 3: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec.resources:type_name -> flyteidl2.core.Resources + 4, // 4: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec.restart_policy:type_name -> flyteidl2.plugins.RestartPolicy + 5, // 5: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec.common:type_name -> flyteidl2.plugins.CommonReplicaSpec + 6, // [6:6] is the sub-list for method output_type + 6, // [6:6] is the sub-list for method input_type + 6, // [6:6] is the sub-list for extension type_name + 6, // [6:6] is the sub-list for extension extendee + 0, // [0:6] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_kubeflow_mpi_proto_init() } +func file_flyteidl2_plugins_kubeflow_mpi_proto_init() { + if File_flyteidl2_plugins_kubeflow_mpi_proto != nil { + return + } + file_flyteidl2_plugins_kubeflow_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedMPITrainingTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedMPITrainingReplicaSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_kubeflow_mpi_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_kubeflow_mpi_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_kubeflow_mpi_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_kubeflow_mpi_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_kubeflow_mpi_proto = out.File + file_flyteidl2_plugins_kubeflow_mpi_proto_rawDesc = nil + file_flyteidl2_plugins_kubeflow_mpi_proto_goTypes = nil + file_flyteidl2_plugins_kubeflow_mpi_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.validate.go b/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.validate.go new file mode 100644 index 0000000000..766d07b14f --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/mpi.pb.validate.go @@ -0,0 +1,400 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/kubeflow/mpi.proto + +package kubeflow + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = plugins.RestartPolicy(0) +) + +// Validate checks the field values on DistributedMPITrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DistributedMPITrainingTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedMPITrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DistributedMPITrainingTaskMultiError, or nil if none found. +func (m *DistributedMPITrainingTask) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedMPITrainingTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetWorkerReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkerReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetLauncherReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "LauncherReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "LauncherReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetLauncherReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingTaskValidationError{ + field: "LauncherReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRunPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedMPITrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRunPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for Slots + + if len(errors) > 0 { + return DistributedMPITrainingTaskMultiError(errors) + } + + return nil +} + +// DistributedMPITrainingTaskMultiError is an error wrapping multiple +// validation errors returned by DistributedMPITrainingTask.ValidateAll() if +// the designated constraints aren't met. +type DistributedMPITrainingTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedMPITrainingTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedMPITrainingTaskMultiError) AllErrors() []error { return m } + +// DistributedMPITrainingTaskValidationError is the validation error returned +// by DistributedMPITrainingTask.Validate if the designated constraints aren't met. +type DistributedMPITrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedMPITrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedMPITrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedMPITrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedMPITrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedMPITrainingTaskValidationError) ErrorName() string { + return "DistributedMPITrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedMPITrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedMPITrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedMPITrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedMPITrainingTaskValidationError{} + +// Validate checks the field values on DistributedMPITrainingReplicaSpec with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *DistributedMPITrainingReplicaSpec) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedMPITrainingReplicaSpec +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// DistributedMPITrainingReplicaSpecMultiError, or nil if none found. +func (m *DistributedMPITrainingReplicaSpec) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedMPITrainingReplicaSpec) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Replicas + + // no validation rules for Image + + if all { + switch v := interface{}(m.GetResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedMPITrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedMPITrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RestartPolicy + + if all { + switch v := interface{}(m.GetCommon()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedMPITrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedMPITrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommon()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedMPITrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DistributedMPITrainingReplicaSpecMultiError(errors) + } + + return nil +} + +// DistributedMPITrainingReplicaSpecMultiError is an error wrapping multiple +// validation errors returned by +// DistributedMPITrainingReplicaSpec.ValidateAll() if the designated +// constraints aren't met. +type DistributedMPITrainingReplicaSpecMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedMPITrainingReplicaSpecMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedMPITrainingReplicaSpecMultiError) AllErrors() []error { return m } + +// DistributedMPITrainingReplicaSpecValidationError is the validation error +// returned by DistributedMPITrainingReplicaSpec.Validate if the designated +// constraints aren't met. +type DistributedMPITrainingReplicaSpecValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedMPITrainingReplicaSpecValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedMPITrainingReplicaSpecValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedMPITrainingReplicaSpecValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedMPITrainingReplicaSpecValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedMPITrainingReplicaSpecValidationError) ErrorName() string { + return "DistributedMPITrainingReplicaSpecValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedMPITrainingReplicaSpecValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedMPITrainingReplicaSpec.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedMPITrainingReplicaSpecValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedMPITrainingReplicaSpecValidationError{} diff --git a/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.go b/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.go new file mode 100644 index 0000000000..d011795e90 --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.go @@ -0,0 +1,469 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/kubeflow/pytorch.proto + +package kubeflow + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Custom proto for torch elastic config for distributed training using +// https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go +type ElasticConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RdzvBackend string `protobuf:"bytes,1,opt,name=rdzv_backend,json=rdzvBackend,proto3" json:"rdzv_backend,omitempty"` + MinReplicas int32 `protobuf:"varint,2,opt,name=min_replicas,json=minReplicas,proto3" json:"min_replicas,omitempty"` + MaxReplicas int32 `protobuf:"varint,3,opt,name=max_replicas,json=maxReplicas,proto3" json:"max_replicas,omitempty"` + NprocPerNode int32 `protobuf:"varint,4,opt,name=nproc_per_node,json=nprocPerNode,proto3" json:"nproc_per_node,omitempty"` + MaxRestarts int32 `protobuf:"varint,5,opt,name=max_restarts,json=maxRestarts,proto3" json:"max_restarts,omitempty"` +} + +func (x *ElasticConfig) Reset() { + *x = ElasticConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ElasticConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ElasticConfig) ProtoMessage() {} + +func (x *ElasticConfig) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ElasticConfig.ProtoReflect.Descriptor instead. +func (*ElasticConfig) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescGZIP(), []int{0} +} + +func (x *ElasticConfig) GetRdzvBackend() string { + if x != nil { + return x.RdzvBackend + } + return "" +} + +func (x *ElasticConfig) GetMinReplicas() int32 { + if x != nil { + return x.MinReplicas + } + return 0 +} + +func (x *ElasticConfig) GetMaxReplicas() int32 { + if x != nil { + return x.MaxReplicas + } + return 0 +} + +func (x *ElasticConfig) GetNprocPerNode() int32 { + if x != nil { + return x.NprocPerNode + } + return 0 +} + +func (x *ElasticConfig) GetMaxRestarts() int32 { + if x != nil { + return x.MaxRestarts + } + return 0 +} + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator +type DistributedPyTorchTrainingTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Worker replicas spec + WorkerReplicas *DistributedPyTorchTrainingReplicaSpec `protobuf:"bytes,1,opt,name=worker_replicas,json=workerReplicas,proto3" json:"worker_replicas,omitempty"` + // Master replicas spec, master replicas can only have 1 replica + MasterReplicas *DistributedPyTorchTrainingReplicaSpec `protobuf:"bytes,2,opt,name=master_replicas,json=masterReplicas,proto3" json:"master_replicas,omitempty"` + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy *RunPolicy `protobuf:"bytes,3,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + // config for an elastic pytorch job + ElasticConfig *ElasticConfig `protobuf:"bytes,4,opt,name=elastic_config,json=elasticConfig,proto3" json:"elastic_config,omitempty"` +} + +func (x *DistributedPyTorchTrainingTask) Reset() { + *x = DistributedPyTorchTrainingTask{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedPyTorchTrainingTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedPyTorchTrainingTask) ProtoMessage() {} + +func (x *DistributedPyTorchTrainingTask) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedPyTorchTrainingTask.ProtoReflect.Descriptor instead. +func (*DistributedPyTorchTrainingTask) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescGZIP(), []int{1} +} + +func (x *DistributedPyTorchTrainingTask) GetWorkerReplicas() *DistributedPyTorchTrainingReplicaSpec { + if x != nil { + return x.WorkerReplicas + } + return nil +} + +func (x *DistributedPyTorchTrainingTask) GetMasterReplicas() *DistributedPyTorchTrainingReplicaSpec { + if x != nil { + return x.MasterReplicas + } + return nil +} + +func (x *DistributedPyTorchTrainingTask) GetRunPolicy() *RunPolicy { + if x != nil { + return x.RunPolicy + } + return nil +} + +func (x *DistributedPyTorchTrainingTask) GetElasticConfig() *ElasticConfig { + if x != nil { + return x.ElasticConfig + } + return nil +} + +type DistributedPyTorchTrainingReplicaSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // 1~4 deprecated. Use common instead. + // Number of replicas + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. + Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` + // Image used for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // Restart policy determines whether pods will be restarted when they exit + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. + RestartPolicy plugins.RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl2.plugins.RestartPolicy" json:"restart_policy,omitempty"` + // The common replica spec + Common *plugins.CommonReplicaSpec `protobuf:"bytes,5,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DistributedPyTorchTrainingReplicaSpec) Reset() { + *x = DistributedPyTorchTrainingReplicaSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedPyTorchTrainingReplicaSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedPyTorchTrainingReplicaSpec) ProtoMessage() {} + +func (x *DistributedPyTorchTrainingReplicaSpec) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedPyTorchTrainingReplicaSpec.ProtoReflect.Descriptor instead. +func (*DistributedPyTorchTrainingReplicaSpec) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescGZIP(), []int{2} +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. +func (x *DistributedPyTorchTrainingReplicaSpec) GetReplicas() int32 { + if x != nil { + return x.Replicas + } + return 0 +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. +func (x *DistributedPyTorchTrainingReplicaSpec) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. +func (x *DistributedPyTorchTrainingReplicaSpec) GetResources() *core.Resources { + if x != nil { + return x.Resources + } + return nil +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/pytorch.proto. +func (x *DistributedPyTorchTrainingReplicaSpec) GetRestartPolicy() plugins.RestartPolicy { + if x != nil { + return x.RestartPolicy + } + return plugins.RestartPolicy(0) +} + +func (x *DistributedPyTorchTrainingReplicaSpec) GetCommon() *plugins.CommonReplicaSpec { + if x != nil { + return x.Common + } + return nil +} + +var File_flyteidl2_plugins_kubeflow_pytorch_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x79, 0x74, + 0x6f, 0x72, 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, 0x0a, 0x0d, + 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x64, 0x7a, 0x76, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x64, 0x7a, 0x76, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, + 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x22, + 0x90, 0x03, 0x0a, 0x1e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, + 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x73, 0x6b, 0x12, 0x6a, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6a, + 0x0a, 0x0f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, + 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0d, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0xa9, 0x02, 0x0a, 0x25, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x08, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0xfa, + 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x42, 0x0c, 0x50, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, + 0x02, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, + 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, + 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, + 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescData = file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDesc +) + +func file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescData) + }) + return file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDescData +} + +var file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_flyteidl2_plugins_kubeflow_pytorch_proto_goTypes = []interface{}{ + (*ElasticConfig)(nil), // 0: flyteidl2.plugins.kubeflow.ElasticConfig + (*DistributedPyTorchTrainingTask)(nil), // 1: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask + (*DistributedPyTorchTrainingReplicaSpec)(nil), // 2: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec + (*RunPolicy)(nil), // 3: flyteidl2.plugins.kubeflow.RunPolicy + (*core.Resources)(nil), // 4: flyteidl2.core.Resources + (plugins.RestartPolicy)(0), // 5: flyteidl2.plugins.RestartPolicy + (*plugins.CommonReplicaSpec)(nil), // 6: flyteidl2.plugins.CommonReplicaSpec +} +var file_flyteidl2_plugins_kubeflow_pytorch_proto_depIdxs = []int32{ + 2, // 0: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask.worker_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec + 2, // 1: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask.master_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec + 3, // 2: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask.run_policy:type_name -> flyteidl2.plugins.kubeflow.RunPolicy + 0, // 3: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask.elastic_config:type_name -> flyteidl2.plugins.kubeflow.ElasticConfig + 4, // 4: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.resources:type_name -> flyteidl2.core.Resources + 5, // 5: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.restart_policy:type_name -> flyteidl2.plugins.RestartPolicy + 6, // 6: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec.common:type_name -> flyteidl2.plugins.CommonReplicaSpec + 7, // [7:7] is the sub-list for method output_type + 7, // [7:7] is the sub-list for method input_type + 7, // [7:7] is the sub-list for extension type_name + 7, // [7:7] is the sub-list for extension extendee + 0, // [0:7] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_kubeflow_pytorch_proto_init() } +func file_flyteidl2_plugins_kubeflow_pytorch_proto_init() { + if File_flyteidl2_plugins_kubeflow_pytorch_proto != nil { + return + } + file_flyteidl2_plugins_kubeflow_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ElasticConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedPyTorchTrainingTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedPyTorchTrainingReplicaSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_kubeflow_pytorch_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_kubeflow_pytorch_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_kubeflow_pytorch_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_kubeflow_pytorch_proto = out.File + file_flyteidl2_plugins_kubeflow_pytorch_proto_rawDesc = nil + file_flyteidl2_plugins_kubeflow_pytorch_proto_goTypes = nil + file_flyteidl2_plugins_kubeflow_pytorch_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.validate.go b/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.validate.go new file mode 100644 index 0000000000..254a30603a --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/pytorch.pb.validate.go @@ -0,0 +1,538 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/kubeflow/pytorch.proto + +package kubeflow + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = plugins.RestartPolicy(0) +) + +// Validate checks the field values on ElasticConfig with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *ElasticConfig) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ElasticConfig with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in ElasticConfigMultiError, or +// nil if none found. +func (m *ElasticConfig) ValidateAll() error { + return m.validate(true) +} + +func (m *ElasticConfig) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RdzvBackend + + // no validation rules for MinReplicas + + // no validation rules for MaxReplicas + + // no validation rules for NprocPerNode + + // no validation rules for MaxRestarts + + if len(errors) > 0 { + return ElasticConfigMultiError(errors) + } + + return nil +} + +// ElasticConfigMultiError is an error wrapping multiple validation errors +// returned by ElasticConfig.ValidateAll() if the designated constraints +// aren't met. +type ElasticConfigMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ElasticConfigMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ElasticConfigMultiError) AllErrors() []error { return m } + +// ElasticConfigValidationError is the validation error returned by +// ElasticConfig.Validate if the designated constraints aren't met. +type ElasticConfigValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ElasticConfigValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ElasticConfigValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ElasticConfigValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ElasticConfigValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ElasticConfigValidationError) ErrorName() string { return "ElasticConfigValidationError" } + +// Error satisfies the builtin error interface +func (e ElasticConfigValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sElasticConfig.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ElasticConfigValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ElasticConfigValidationError{} + +// Validate checks the field values on DistributedPyTorchTrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DistributedPyTorchTrainingTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedPyTorchTrainingTask with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// DistributedPyTorchTrainingTaskMultiError, or nil if none found. +func (m *DistributedPyTorchTrainingTask) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedPyTorchTrainingTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetWorkerReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkerReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetMasterReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "MasterReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "MasterReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetMasterReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingTaskValidationError{ + field: "MasterReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRunPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRunPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetElasticConfig()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "ElasticConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingTaskValidationError{ + field: "ElasticConfig", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetElasticConfig()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingTaskValidationError{ + field: "ElasticConfig", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DistributedPyTorchTrainingTaskMultiError(errors) + } + + return nil +} + +// DistributedPyTorchTrainingTaskMultiError is an error wrapping multiple +// validation errors returned by DistributedPyTorchTrainingTask.ValidateAll() +// if the designated constraints aren't met. +type DistributedPyTorchTrainingTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedPyTorchTrainingTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedPyTorchTrainingTaskMultiError) AllErrors() []error { return m } + +// DistributedPyTorchTrainingTaskValidationError is the validation error +// returned by DistributedPyTorchTrainingTask.Validate if the designated +// constraints aren't met. +type DistributedPyTorchTrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedPyTorchTrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedPyTorchTrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedPyTorchTrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedPyTorchTrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedPyTorchTrainingTaskValidationError) ErrorName() string { + return "DistributedPyTorchTrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedPyTorchTrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedPyTorchTrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedPyTorchTrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedPyTorchTrainingTaskValidationError{} + +// Validate checks the field values on DistributedPyTorchTrainingReplicaSpec +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *DistributedPyTorchTrainingReplicaSpec) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedPyTorchTrainingReplicaSpec +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// DistributedPyTorchTrainingReplicaSpecMultiError, or nil if none found. +func (m *DistributedPyTorchTrainingReplicaSpec) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedPyTorchTrainingReplicaSpec) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Replicas + + // no validation rules for Image + + if all { + switch v := interface{}(m.GetResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RestartPolicy + + if all { + switch v := interface{}(m.GetCommon()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommon()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedPyTorchTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DistributedPyTorchTrainingReplicaSpecMultiError(errors) + } + + return nil +} + +// DistributedPyTorchTrainingReplicaSpecMultiError is an error wrapping +// multiple validation errors returned by +// DistributedPyTorchTrainingReplicaSpec.ValidateAll() if the designated +// constraints aren't met. +type DistributedPyTorchTrainingReplicaSpecMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedPyTorchTrainingReplicaSpecMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedPyTorchTrainingReplicaSpecMultiError) AllErrors() []error { return m } + +// DistributedPyTorchTrainingReplicaSpecValidationError is the validation error +// returned by DistributedPyTorchTrainingReplicaSpec.Validate if the +// designated constraints aren't met. +type DistributedPyTorchTrainingReplicaSpecValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedPyTorchTrainingReplicaSpecValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedPyTorchTrainingReplicaSpecValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedPyTorchTrainingReplicaSpecValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedPyTorchTrainingReplicaSpecValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedPyTorchTrainingReplicaSpecValidationError) ErrorName() string { + return "DistributedPyTorchTrainingReplicaSpecValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedPyTorchTrainingReplicaSpecValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedPyTorchTrainingReplicaSpec.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedPyTorchTrainingReplicaSpecValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedPyTorchTrainingReplicaSpecValidationError{} diff --git a/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.go b/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.go new file mode 100644 index 0000000000..c75b58fda1 --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.go @@ -0,0 +1,382 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/kubeflow/tensorflow.proto + +package kubeflow + +import ( + core "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core" + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +type DistributedTensorflowTrainingTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Worker replicas spec + WorkerReplicas *DistributedTensorflowTrainingReplicaSpec `protobuf:"bytes,1,opt,name=worker_replicas,json=workerReplicas,proto3" json:"worker_replicas,omitempty"` + // Parameter server replicas spec + PsReplicas *DistributedTensorflowTrainingReplicaSpec `protobuf:"bytes,2,opt,name=ps_replicas,json=psReplicas,proto3" json:"ps_replicas,omitempty"` + // Chief replicas spec + ChiefReplicas *DistributedTensorflowTrainingReplicaSpec `protobuf:"bytes,3,opt,name=chief_replicas,json=chiefReplicas,proto3" json:"chief_replicas,omitempty"` + // RunPolicy encapsulates various runtime policies of the distributed training + // job, for example how to clean up resources and how long the job can stay + // active. + RunPolicy *RunPolicy `protobuf:"bytes,4,opt,name=run_policy,json=runPolicy,proto3" json:"run_policy,omitempty"` + // Evaluator replicas spec + EvaluatorReplicas *DistributedTensorflowTrainingReplicaSpec `protobuf:"bytes,5,opt,name=evaluator_replicas,json=evaluatorReplicas,proto3" json:"evaluator_replicas,omitempty"` +} + +func (x *DistributedTensorflowTrainingTask) Reset() { + *x = DistributedTensorflowTrainingTask{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedTensorflowTrainingTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedTensorflowTrainingTask) ProtoMessage() {} + +func (x *DistributedTensorflowTrainingTask) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedTensorflowTrainingTask.ProtoReflect.Descriptor instead. +func (*DistributedTensorflowTrainingTask) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescGZIP(), []int{0} +} + +func (x *DistributedTensorflowTrainingTask) GetWorkerReplicas() *DistributedTensorflowTrainingReplicaSpec { + if x != nil { + return x.WorkerReplicas + } + return nil +} + +func (x *DistributedTensorflowTrainingTask) GetPsReplicas() *DistributedTensorflowTrainingReplicaSpec { + if x != nil { + return x.PsReplicas + } + return nil +} + +func (x *DistributedTensorflowTrainingTask) GetChiefReplicas() *DistributedTensorflowTrainingReplicaSpec { + if x != nil { + return x.ChiefReplicas + } + return nil +} + +func (x *DistributedTensorflowTrainingTask) GetRunPolicy() *RunPolicy { + if x != nil { + return x.RunPolicy + } + return nil +} + +func (x *DistributedTensorflowTrainingTask) GetEvaluatorReplicas() *DistributedTensorflowTrainingReplicaSpec { + if x != nil { + return x.EvaluatorReplicas + } + return nil +} + +type DistributedTensorflowTrainingReplicaSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // 1~4 deprecated. Use common instead. + // Number of replicas + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. + Replicas int32 `protobuf:"varint,1,opt,name=replicas,proto3" json:"replicas,omitempty"` + // Image used for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"` + // Resources required for the replica group + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. + Resources *core.Resources `protobuf:"bytes,3,opt,name=resources,proto3" json:"resources,omitempty"` + // Restart policy determines whether pods will be restarted when they exit + // + // Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. + RestartPolicy plugins.RestartPolicy `protobuf:"varint,4,opt,name=restart_policy,json=restartPolicy,proto3,enum=flyteidl2.plugins.RestartPolicy" json:"restart_policy,omitempty"` + // The common replica spec + Common *plugins.CommonReplicaSpec `protobuf:"bytes,5,opt,name=common,proto3" json:"common,omitempty"` +} + +func (x *DistributedTensorflowTrainingReplicaSpec) Reset() { + *x = DistributedTensorflowTrainingReplicaSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedTensorflowTrainingReplicaSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedTensorflowTrainingReplicaSpec) ProtoMessage() {} + +func (x *DistributedTensorflowTrainingReplicaSpec) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedTensorflowTrainingReplicaSpec.ProtoReflect.Descriptor instead. +func (*DistributedTensorflowTrainingReplicaSpec) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescGZIP(), []int{1} +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. +func (x *DistributedTensorflowTrainingReplicaSpec) GetReplicas() int32 { + if x != nil { + return x.Replicas + } + return 0 +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. +func (x *DistributedTensorflowTrainingReplicaSpec) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. +func (x *DistributedTensorflowTrainingReplicaSpec) GetResources() *core.Resources { + if x != nil { + return x.Resources + } + return nil +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/kubeflow/tensorflow.proto. +func (x *DistributedTensorflowTrainingReplicaSpec) GetRestartPolicy() plugins.RestartPolicy { + if x != nil { + return x.RestartPolicy + } + return plugins.RestartPolicy(0) +} + +func (x *DistributedTensorflowTrainingReplicaSpec) GetCommon() *plugins.CommonReplicaSpec { + if x != nil { + return x.Common + } + return nil +} + +var File_flyteidl2_plugins_kubeflow_tensorflow_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDesc = []byte{ + 0x0a, 0x2b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, 0x65, 0x6e, + 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa1, + 0x04, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, + 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, + 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, + 0x70, 0x65, 0x63, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0a, + 0x70, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6b, 0x0a, 0x0e, 0x63, 0x68, + 0x69, 0x65, 0x66, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, + 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0d, 0x63, 0x68, 0x69, 0x65, 0x66, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, 0x6e, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x73, 0x0a, + 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, + 0x11, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x22, 0xac, 0x02, 0x0a, 0x28, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, + 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x1e, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, + 0x18, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x42, 0xfd, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x0f, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, + 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescData = file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDesc +) + +func file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescData) + }) + return file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDescData +} + +var file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_flyteidl2_plugins_kubeflow_tensorflow_proto_goTypes = []interface{}{ + (*DistributedTensorflowTrainingTask)(nil), // 0: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask + (*DistributedTensorflowTrainingReplicaSpec)(nil), // 1: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + (*RunPolicy)(nil), // 2: flyteidl2.plugins.kubeflow.RunPolicy + (*core.Resources)(nil), // 3: flyteidl2.core.Resources + (plugins.RestartPolicy)(0), // 4: flyteidl2.plugins.RestartPolicy + (*plugins.CommonReplicaSpec)(nil), // 5: flyteidl2.plugins.CommonReplicaSpec +} +var file_flyteidl2_plugins_kubeflow_tensorflow_proto_depIdxs = []int32{ + 1, // 0: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask.worker_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + 1, // 1: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask.ps_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + 1, // 2: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask.chief_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + 2, // 3: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask.run_policy:type_name -> flyteidl2.plugins.kubeflow.RunPolicy + 1, // 4: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask.evaluator_replicas:type_name -> flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + 3, // 5: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.resources:type_name -> flyteidl2.core.Resources + 4, // 6: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.restart_policy:type_name -> flyteidl2.plugins.RestartPolicy + 5, // 7: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec.common:type_name -> flyteidl2.plugins.CommonReplicaSpec + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_kubeflow_tensorflow_proto_init() } +func file_flyteidl2_plugins_kubeflow_tensorflow_proto_init() { + if File_flyteidl2_plugins_kubeflow_tensorflow_proto != nil { + return + } + file_flyteidl2_plugins_kubeflow_common_proto_init() + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedTensorflowTrainingTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedTensorflowTrainingReplicaSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_kubeflow_tensorflow_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_kubeflow_tensorflow_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_kubeflow_tensorflow_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_kubeflow_tensorflow_proto = out.File + file_flyteidl2_plugins_kubeflow_tensorflow_proto_rawDesc = nil + file_flyteidl2_plugins_kubeflow_tensorflow_proto_goTypes = nil + file_flyteidl2_plugins_kubeflow_tensorflow_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.validate.go b/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.validate.go new file mode 100644 index 0000000000..eece03600b --- /dev/null +++ b/gen/go/flyteidl2/plugins/kubeflow/tensorflow.pb.validate.go @@ -0,0 +1,460 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/kubeflow/tensorflow.proto + +package kubeflow + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" + + plugins "github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort + + _ = plugins.RestartPolicy(0) +) + +// Validate checks the field values on DistributedTensorflowTrainingTask with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *DistributedTensorflowTrainingTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedTensorflowTrainingTask +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// DistributedTensorflowTrainingTaskMultiError, or nil if none found. +func (m *DistributedTensorflowTrainingTask) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedTensorflowTrainingTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if all { + switch v := interface{}(m.GetWorkerReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetWorkerReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingTaskValidationError{ + field: "WorkerReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetPsReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "PsReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "PsReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetPsReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingTaskValidationError{ + field: "PsReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetChiefReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "ChiefReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "ChiefReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetChiefReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingTaskValidationError{ + field: "ChiefReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetRunPolicy()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetRunPolicy()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingTaskValidationError{ + field: "RunPolicy", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetEvaluatorReplicas()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "EvaluatorReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingTaskValidationError{ + field: "EvaluatorReplicas", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetEvaluatorReplicas()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingTaskValidationError{ + field: "EvaluatorReplicas", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DistributedTensorflowTrainingTaskMultiError(errors) + } + + return nil +} + +// DistributedTensorflowTrainingTaskMultiError is an error wrapping multiple +// validation errors returned by +// DistributedTensorflowTrainingTask.ValidateAll() if the designated +// constraints aren't met. +type DistributedTensorflowTrainingTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedTensorflowTrainingTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedTensorflowTrainingTaskMultiError) AllErrors() []error { return m } + +// DistributedTensorflowTrainingTaskValidationError is the validation error +// returned by DistributedTensorflowTrainingTask.Validate if the designated +// constraints aren't met. +type DistributedTensorflowTrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedTensorflowTrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedTensorflowTrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedTensorflowTrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedTensorflowTrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedTensorflowTrainingTaskValidationError) ErrorName() string { + return "DistributedTensorflowTrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedTensorflowTrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedTensorflowTrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedTensorflowTrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedTensorflowTrainingTaskValidationError{} + +// Validate checks the field values on DistributedTensorflowTrainingReplicaSpec +// with the rules defined in the proto definition for this message. If any +// rules are violated, the first error encountered is returned, or nil if +// there are no violations. +func (m *DistributedTensorflowTrainingReplicaSpec) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on +// DistributedTensorflowTrainingReplicaSpec with the rules defined in the +// proto definition for this message. If any rules are violated, the result is +// a list of violation errors wrapped in +// DistributedTensorflowTrainingReplicaSpecMultiError, or nil if none found. +func (m *DistributedTensorflowTrainingReplicaSpec) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedTensorflowTrainingReplicaSpec) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Replicas + + // no validation rules for Image + + if all { + switch v := interface{}(m.GetResources()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetResources()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Resources", + reason: "embedded message failed validation", + cause: err, + } + } + } + + // no validation rules for RestartPolicy + + if all { + switch v := interface{}(m.GetCommon()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetCommon()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return DistributedTensorflowTrainingReplicaSpecValidationError{ + field: "Common", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return DistributedTensorflowTrainingReplicaSpecMultiError(errors) + } + + return nil +} + +// DistributedTensorflowTrainingReplicaSpecMultiError is an error wrapping +// multiple validation errors returned by +// DistributedTensorflowTrainingReplicaSpec.ValidateAll() if the designated +// constraints aren't met. +type DistributedTensorflowTrainingReplicaSpecMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedTensorflowTrainingReplicaSpecMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedTensorflowTrainingReplicaSpecMultiError) AllErrors() []error { return m } + +// DistributedTensorflowTrainingReplicaSpecValidationError is the validation +// error returned by DistributedTensorflowTrainingReplicaSpec.Validate if the +// designated constraints aren't met. +type DistributedTensorflowTrainingReplicaSpecValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedTensorflowTrainingReplicaSpecValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedTensorflowTrainingReplicaSpecValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedTensorflowTrainingReplicaSpecValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedTensorflowTrainingReplicaSpecValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedTensorflowTrainingReplicaSpecValidationError) ErrorName() string { + return "DistributedTensorflowTrainingReplicaSpecValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedTensorflowTrainingReplicaSpecValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedTensorflowTrainingReplicaSpec.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedTensorflowTrainingReplicaSpecValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedTensorflowTrainingReplicaSpecValidationError{} diff --git a/gen/go/flyteidl2/plugins/mpi.pb.go b/gen/go/flyteidl2/plugins/mpi.pb.go new file mode 100644 index 0000000000..33dd0c8f51 --- /dev/null +++ b/gen/go/flyteidl2/plugins/mpi.pb.go @@ -0,0 +1,184 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/mpi.proto + +package plugins + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator +type DistributedMPITrainingTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // number of worker spawned in the cluster for this job + NumWorkers int32 `protobuf:"varint,1,opt,name=num_workers,json=numWorkers,proto3" json:"num_workers,omitempty"` + // number of launcher replicas spawned in the cluster for this job + // The launcher pod invokes mpirun and communicates with worker pods through MPI. + NumLauncherReplicas int32 `protobuf:"varint,2,opt,name=num_launcher_replicas,json=numLauncherReplicas,proto3" json:"num_launcher_replicas,omitempty"` + // number of slots per worker used in hostfile. + // The available slots (GPUs) in each pod. + Slots int32 `protobuf:"varint,3,opt,name=slots,proto3" json:"slots,omitempty"` +} + +func (x *DistributedMPITrainingTask) Reset() { + *x = DistributedMPITrainingTask{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_mpi_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedMPITrainingTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedMPITrainingTask) ProtoMessage() {} + +func (x *DistributedMPITrainingTask) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_mpi_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedMPITrainingTask.ProtoReflect.Descriptor instead. +func (*DistributedMPITrainingTask) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_mpi_proto_rawDescGZIP(), []int{0} +} + +func (x *DistributedMPITrainingTask) GetNumWorkers() int32 { + if x != nil { + return x.NumWorkers + } + return 0 +} + +func (x *DistributedMPITrainingTask) GetNumLauncherReplicas() int32 { + if x != nil { + return x.NumLauncherReplicas + } + return 0 +} + +func (x *DistributedMPITrainingTask) GetSlots() int32 { + if x != nil { + return x.Slots + } + return 0 +} + +var File_flyteidl2_plugins_mpi_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_mpi_proto_rawDesc = []byte{ + 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6d, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x22, 0x87, 0x01, 0x0a, 0x1a, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, + 0x1f, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x12, 0x32, 0x0a, 0x15, 0x6e, 0x75, 0x6d, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x13, 0x6e, 0x75, 0x6d, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x42, 0xbf, 0x01, 0x0a, 0x15, 0x63, + 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x42, 0x08, 0x4d, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, + 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, + 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_mpi_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_mpi_proto_rawDescData = file_flyteidl2_plugins_mpi_proto_rawDesc +) + +func file_flyteidl2_plugins_mpi_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_mpi_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_mpi_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_mpi_proto_rawDescData) + }) + return file_flyteidl2_plugins_mpi_proto_rawDescData +} + +var file_flyteidl2_plugins_mpi_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_flyteidl2_plugins_mpi_proto_goTypes = []interface{}{ + (*DistributedMPITrainingTask)(nil), // 0: flyteidl2.plugins.DistributedMPITrainingTask +} +var file_flyteidl2_plugins_mpi_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_mpi_proto_init() } +func file_flyteidl2_plugins_mpi_proto_init() { + if File_flyteidl2_plugins_mpi_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_mpi_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedMPITrainingTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_mpi_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_mpi_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_mpi_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_mpi_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_mpi_proto = out.File + file_flyteidl2_plugins_mpi_proto_rawDesc = nil + file_flyteidl2_plugins_mpi_proto_goTypes = nil + file_flyteidl2_plugins_mpi_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/mpi.pb.validate.go b/gen/go/flyteidl2/plugins/mpi.pb.validate.go new file mode 100644 index 0000000000..a00f4d991d --- /dev/null +++ b/gen/go/flyteidl2/plugins/mpi.pb.validate.go @@ -0,0 +1,144 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/mpi.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DistributedMPITrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *DistributedMPITrainingTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedMPITrainingTask with the +// rules defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// DistributedMPITrainingTaskMultiError, or nil if none found. +func (m *DistributedMPITrainingTask) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedMPITrainingTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for NumWorkers + + // no validation rules for NumLauncherReplicas + + // no validation rules for Slots + + if len(errors) > 0 { + return DistributedMPITrainingTaskMultiError(errors) + } + + return nil +} + +// DistributedMPITrainingTaskMultiError is an error wrapping multiple +// validation errors returned by DistributedMPITrainingTask.ValidateAll() if +// the designated constraints aren't met. +type DistributedMPITrainingTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedMPITrainingTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedMPITrainingTaskMultiError) AllErrors() []error { return m } + +// DistributedMPITrainingTaskValidationError is the validation error returned +// by DistributedMPITrainingTask.Validate if the designated constraints aren't met. +type DistributedMPITrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedMPITrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedMPITrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedMPITrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedMPITrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedMPITrainingTaskValidationError) ErrorName() string { + return "DistributedMPITrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedMPITrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedMPITrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedMPITrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedMPITrainingTaskValidationError{} diff --git a/gen/go/flyteidl2/plugins/presto.pb.go b/gen/go/flyteidl2/plugins/presto.pb.go new file mode 100644 index 0000000000..f3ddacbc3b --- /dev/null +++ b/gen/go/flyteidl2/plugins/presto.pb.go @@ -0,0 +1,187 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/presto.proto + +package plugins + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field +// of a Presto task's TaskTemplate +type PrestoQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + RoutingGroup string `protobuf:"bytes,1,opt,name=routing_group,json=routingGroup,proto3" json:"routing_group,omitempty"` + Catalog string `protobuf:"bytes,2,opt,name=catalog,proto3" json:"catalog,omitempty"` + Schema string `protobuf:"bytes,3,opt,name=schema,proto3" json:"schema,omitempty"` + Statement string `protobuf:"bytes,4,opt,name=statement,proto3" json:"statement,omitempty"` +} + +func (x *PrestoQuery) Reset() { + *x = PrestoQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_presto_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrestoQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrestoQuery) ProtoMessage() {} + +func (x *PrestoQuery) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_presto_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrestoQuery.ProtoReflect.Descriptor instead. +func (*PrestoQuery) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_presto_proto_rawDescGZIP(), []int{0} +} + +func (x *PrestoQuery) GetRoutingGroup() string { + if x != nil { + return x.RoutingGroup + } + return "" +} + +func (x *PrestoQuery) GetCatalog() string { + if x != nil { + return x.Catalog + } + return "" +} + +func (x *PrestoQuery) GetSchema() string { + if x != nil { + return x.Schema + } + return "" +} + +func (x *PrestoQuery) GetStatement() string { + if x != nil { + return x.Statement + } + return "" +} + +var File_flyteidl2_plugins_presto_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_presto_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x70, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x51, 0x75, + 0x65, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x5f, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x72, 0x6f, 0x75, 0x74, + 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x74, + 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0xc2, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x42, 0x0b, 0x50, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, + 0x02, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, + 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, + 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_presto_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_presto_proto_rawDescData = file_flyteidl2_plugins_presto_proto_rawDesc +) + +func file_flyteidl2_plugins_presto_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_presto_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_presto_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_presto_proto_rawDescData) + }) + return file_flyteidl2_plugins_presto_proto_rawDescData +} + +var file_flyteidl2_plugins_presto_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_flyteidl2_plugins_presto_proto_goTypes = []interface{}{ + (*PrestoQuery)(nil), // 0: flyteidl2.plugins.PrestoQuery +} +var file_flyteidl2_plugins_presto_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_presto_proto_init() } +func file_flyteidl2_plugins_presto_proto_init() { + if File_flyteidl2_plugins_presto_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_presto_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrestoQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_presto_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_presto_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_presto_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_presto_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_presto_proto = out.File + file_flyteidl2_plugins_presto_proto_rawDesc = nil + file_flyteidl2_plugins_presto_proto_goTypes = nil + file_flyteidl2_plugins_presto_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/presto.pb.validate.go b/gen/go/flyteidl2/plugins/presto.pb.validate.go new file mode 100644 index 0000000000..5f3a87353e --- /dev/null +++ b/gen/go/flyteidl2/plugins/presto.pb.validate.go @@ -0,0 +1,143 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/presto.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on PrestoQuery with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *PrestoQuery) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on PrestoQuery with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in PrestoQueryMultiError, or +// nil if none found. +func (m *PrestoQuery) ValidateAll() error { + return m.validate(true) +} + +func (m *PrestoQuery) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for RoutingGroup + + // no validation rules for Catalog + + // no validation rules for Schema + + // no validation rules for Statement + + if len(errors) > 0 { + return PrestoQueryMultiError(errors) + } + + return nil +} + +// PrestoQueryMultiError is an error wrapping multiple validation errors +// returned by PrestoQuery.ValidateAll() if the designated constraints aren't met. +type PrestoQueryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m PrestoQueryMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m PrestoQueryMultiError) AllErrors() []error { return m } + +// PrestoQueryValidationError is the validation error returned by +// PrestoQuery.Validate if the designated constraints aren't met. +type PrestoQueryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e PrestoQueryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e PrestoQueryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e PrestoQueryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e PrestoQueryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e PrestoQueryValidationError) ErrorName() string { return "PrestoQueryValidationError" } + +// Error satisfies the builtin error interface +func (e PrestoQueryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sPrestoQuery.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = PrestoQueryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = PrestoQueryValidationError{} diff --git a/gen/go/flyteidl2/plugins/qubole.pb.go b/gen/go/flyteidl2/plugins/qubole.pb.go new file mode 100644 index 0000000000..fa182c16cf --- /dev/null +++ b/gen/go/flyteidl2/plugins/qubole.pb.go @@ -0,0 +1,346 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/qubole.proto + +package plugins + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Defines a query to execute on a hive cluster. +type HiveQuery struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Query string `protobuf:"bytes,1,opt,name=query,proto3" json:"query,omitempty"` + TimeoutSec uint32 `protobuf:"varint,2,opt,name=timeout_sec,json=timeoutSec,proto3" json:"timeout_sec,omitempty"` + RetryCount uint32 `protobuf:"varint,3,opt,name=retryCount,proto3" json:"retryCount,omitempty"` +} + +func (x *HiveQuery) Reset() { + *x = HiveQuery{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HiveQuery) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HiveQuery) ProtoMessage() {} + +func (x *HiveQuery) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HiveQuery.ProtoReflect.Descriptor instead. +func (*HiveQuery) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_qubole_proto_rawDescGZIP(), []int{0} +} + +func (x *HiveQuery) GetQuery() string { + if x != nil { + return x.Query + } + return "" +} + +func (x *HiveQuery) GetTimeoutSec() uint32 { + if x != nil { + return x.TimeoutSec + } + return 0 +} + +func (x *HiveQuery) GetRetryCount() uint32 { + if x != nil { + return x.RetryCount + } + return 0 +} + +// Defines a collection of hive queries. +type HiveQueryCollection struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Queries []*HiveQuery `protobuf:"bytes,2,rep,name=queries,proto3" json:"queries,omitempty"` +} + +func (x *HiveQueryCollection) Reset() { + *x = HiveQueryCollection{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HiveQueryCollection) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HiveQueryCollection) ProtoMessage() {} + +func (x *HiveQueryCollection) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HiveQueryCollection.ProtoReflect.Descriptor instead. +func (*HiveQueryCollection) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_qubole_proto_rawDescGZIP(), []int{1} +} + +func (x *HiveQueryCollection) GetQueries() []*HiveQuery { + if x != nil { + return x.Queries + } + return nil +} + +// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field +// of a hive task's TaskTemplate +type QuboleHiveJob struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ClusterLabel string `protobuf:"bytes,1,opt,name=cluster_label,json=clusterLabel,proto3" json:"cluster_label,omitempty"` + // Deprecated: Marked as deprecated in flyteidl2/plugins/qubole.proto. + QueryCollection *HiveQueryCollection `protobuf:"bytes,2,opt,name=query_collection,json=queryCollection,proto3" json:"query_collection,omitempty"` + Tags []string `protobuf:"bytes,3,rep,name=tags,proto3" json:"tags,omitempty"` + Query *HiveQuery `protobuf:"bytes,4,opt,name=query,proto3" json:"query,omitempty"` +} + +func (x *QuboleHiveJob) Reset() { + *x = QuboleHiveJob{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QuboleHiveJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QuboleHiveJob) ProtoMessage() {} + +func (x *QuboleHiveJob) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_qubole_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QuboleHiveJob.ProtoReflect.Descriptor instead. +func (*QuboleHiveJob) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_qubole_proto_rawDescGZIP(), []int{2} +} + +func (x *QuboleHiveJob) GetClusterLabel() string { + if x != nil { + return x.ClusterLabel + } + return "" +} + +// Deprecated: Marked as deprecated in flyteidl2/plugins/qubole.proto. +func (x *QuboleHiveJob) GetQueryCollection() *HiveQueryCollection { + if x != nil { + return x.QueryCollection + } + return nil +} + +func (x *QuboleHiveJob) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *QuboleHiveJob) GetQuery() *HiveQuery { + if x != nil { + return x.Query + } + return nil +} + +var File_flyteidl2_plugins_qubole_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_qubole_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x71, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x22, 0x62, 0x0a, 0x09, 0x48, 0x69, 0x76, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x69, 0x6d, + 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x13, 0x48, 0x69, 0x76, 0x65, 0x51, + 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x36, + 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, 0x71, + 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd3, 0x01, 0x0a, 0x0d, 0x51, 0x75, 0x62, 0x6f, 0x6c, + 0x65, 0x48, 0x69, 0x76, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x55, 0x0a, + 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, 0x65, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, 0x65, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x42, 0xc2, 0x01, 0x0a, + 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0b, 0x51, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, + 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_qubole_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_qubole_proto_rawDescData = file_flyteidl2_plugins_qubole_proto_rawDesc +) + +func file_flyteidl2_plugins_qubole_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_qubole_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_qubole_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_qubole_proto_rawDescData) + }) + return file_flyteidl2_plugins_qubole_proto_rawDescData +} + +var file_flyteidl2_plugins_qubole_proto_msgTypes = make([]protoimpl.MessageInfo, 3) +var file_flyteidl2_plugins_qubole_proto_goTypes = []interface{}{ + (*HiveQuery)(nil), // 0: flyteidl2.plugins.HiveQuery + (*HiveQueryCollection)(nil), // 1: flyteidl2.plugins.HiveQueryCollection + (*QuboleHiveJob)(nil), // 2: flyteidl2.plugins.QuboleHiveJob +} +var file_flyteidl2_plugins_qubole_proto_depIdxs = []int32{ + 0, // 0: flyteidl2.plugins.HiveQueryCollection.queries:type_name -> flyteidl2.plugins.HiveQuery + 1, // 1: flyteidl2.plugins.QuboleHiveJob.query_collection:type_name -> flyteidl2.plugins.HiveQueryCollection + 0, // 2: flyteidl2.plugins.QuboleHiveJob.query:type_name -> flyteidl2.plugins.HiveQuery + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_qubole_proto_init() } +func file_flyteidl2_plugins_qubole_proto_init() { + if File_flyteidl2_plugins_qubole_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_qubole_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HiveQuery); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_qubole_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HiveQueryCollection); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_flyteidl2_plugins_qubole_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QuboleHiveJob); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_qubole_proto_rawDesc, + NumEnums: 0, + NumMessages: 3, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_qubole_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_qubole_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_qubole_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_qubole_proto = out.File + file_flyteidl2_plugins_qubole_proto_rawDesc = nil + file_flyteidl2_plugins_qubole_proto_goTypes = nil + file_flyteidl2_plugins_qubole_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/qubole.pb.validate.go b/gen/go/flyteidl2/plugins/qubole.pb.validate.go new file mode 100644 index 0000000000..aaf7ffbc51 --- /dev/null +++ b/gen/go/flyteidl2/plugins/qubole.pb.validate.go @@ -0,0 +1,437 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/qubole.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on HiveQuery with the rules defined in the +// proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *HiveQuery) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HiveQuery with the rules defined in +// the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in HiveQueryMultiError, or nil +// if none found. +func (m *HiveQuery) ValidateAll() error { + return m.validate(true) +} + +func (m *HiveQuery) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Query + + // no validation rules for TimeoutSec + + // no validation rules for RetryCount + + if len(errors) > 0 { + return HiveQueryMultiError(errors) + } + + return nil +} + +// HiveQueryMultiError is an error wrapping multiple validation errors returned +// by HiveQuery.ValidateAll() if the designated constraints aren't met. +type HiveQueryMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HiveQueryMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HiveQueryMultiError) AllErrors() []error { return m } + +// HiveQueryValidationError is the validation error returned by +// HiveQuery.Validate if the designated constraints aren't met. +type HiveQueryValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HiveQueryValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HiveQueryValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HiveQueryValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HiveQueryValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HiveQueryValidationError) ErrorName() string { return "HiveQueryValidationError" } + +// Error satisfies the builtin error interface +func (e HiveQueryValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHiveQuery.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HiveQueryValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HiveQueryValidationError{} + +// Validate checks the field values on HiveQueryCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *HiveQueryCollection) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on HiveQueryCollection with the rules +// defined in the proto definition for this message. If any rules are +// violated, the result is a list of violation errors wrapped in +// HiveQueryCollectionMultiError, or nil if none found. +func (m *HiveQueryCollection) ValidateAll() error { + return m.validate(true) +} + +func (m *HiveQueryCollection) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + for idx, item := range m.GetQueries() { + _, _ = idx, item + + if all { + switch v := interface{}(item).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, HiveQueryCollectionValidationError{ + field: fmt.Sprintf("Queries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, HiveQueryCollectionValidationError{ + field: fmt.Sprintf("Queries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(item).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return HiveQueryCollectionValidationError{ + field: fmt.Sprintf("Queries[%v]", idx), + reason: "embedded message failed validation", + cause: err, + } + } + } + + } + + if len(errors) > 0 { + return HiveQueryCollectionMultiError(errors) + } + + return nil +} + +// HiveQueryCollectionMultiError is an error wrapping multiple validation +// errors returned by HiveQueryCollection.ValidateAll() if the designated +// constraints aren't met. +type HiveQueryCollectionMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m HiveQueryCollectionMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m HiveQueryCollectionMultiError) AllErrors() []error { return m } + +// HiveQueryCollectionValidationError is the validation error returned by +// HiveQueryCollection.Validate if the designated constraints aren't met. +type HiveQueryCollectionValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e HiveQueryCollectionValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e HiveQueryCollectionValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e HiveQueryCollectionValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e HiveQueryCollectionValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e HiveQueryCollectionValidationError) ErrorName() string { + return "HiveQueryCollectionValidationError" +} + +// Error satisfies the builtin error interface +func (e HiveQueryCollectionValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sHiveQueryCollection.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = HiveQueryCollectionValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = HiveQueryCollectionValidationError{} + +// Validate checks the field values on QuboleHiveJob with the rules defined in +// the proto definition for this message. If any rules are violated, the first +// error encountered is returned, or nil if there are no violations. +func (m *QuboleHiveJob) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on QuboleHiveJob with the rules defined +// in the proto definition for this message. If any rules are violated, the +// result is a list of violation errors wrapped in QuboleHiveJobMultiError, or +// nil if none found. +func (m *QuboleHiveJob) ValidateAll() error { + return m.validate(true) +} + +func (m *QuboleHiveJob) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for ClusterLabel + + if all { + switch v := interface{}(m.GetQueryCollection()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuboleHiveJobValidationError{ + field: "QueryCollection", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuboleHiveJobValidationError{ + field: "QueryCollection", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQueryCollection()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuboleHiveJobValidationError{ + field: "QueryCollection", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if all { + switch v := interface{}(m.GetQuery()).(type) { + case interface{ ValidateAll() error }: + if err := v.ValidateAll(); err != nil { + errors = append(errors, QuboleHiveJobValidationError{ + field: "Query", + reason: "embedded message failed validation", + cause: err, + }) + } + case interface{ Validate() error }: + if err := v.Validate(); err != nil { + errors = append(errors, QuboleHiveJobValidationError{ + field: "Query", + reason: "embedded message failed validation", + cause: err, + }) + } + } + } else if v, ok := interface{}(m.GetQuery()).(interface{ Validate() error }); ok { + if err := v.Validate(); err != nil { + return QuboleHiveJobValidationError{ + field: "Query", + reason: "embedded message failed validation", + cause: err, + } + } + } + + if len(errors) > 0 { + return QuboleHiveJobMultiError(errors) + } + + return nil +} + +// QuboleHiveJobMultiError is an error wrapping multiple validation errors +// returned by QuboleHiveJob.ValidateAll() if the designated constraints +// aren't met. +type QuboleHiveJobMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m QuboleHiveJobMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m QuboleHiveJobMultiError) AllErrors() []error { return m } + +// QuboleHiveJobValidationError is the validation error returned by +// QuboleHiveJob.Validate if the designated constraints aren't met. +type QuboleHiveJobValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e QuboleHiveJobValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e QuboleHiveJobValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e QuboleHiveJobValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e QuboleHiveJobValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e QuboleHiveJobValidationError) ErrorName() string { return "QuboleHiveJobValidationError" } + +// Error satisfies the builtin error interface +func (e QuboleHiveJobValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sQuboleHiveJob.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = QuboleHiveJobValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = QuboleHiveJobValidationError{} diff --git a/gen/go/flyteidl2/plugins/tensorflow.pb.go b/gen/go/flyteidl2/plugins/tensorflow.pb.go new file mode 100644 index 0000000000..1d76275d01 --- /dev/null +++ b/gen/go/flyteidl2/plugins/tensorflow.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.30.0 +// protoc (unknown) +// source: flyteidl2/plugins/tensorflow.proto + +package plugins + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator +type DistributedTensorflowTrainingTask struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // number of worker replicas spawned in the cluster for this job + Workers int32 `protobuf:"varint,1,opt,name=workers,proto3" json:"workers,omitempty"` + // PS -> Parameter server + // number of ps replicas spawned in the cluster for this job + PsReplicas int32 `protobuf:"varint,2,opt,name=ps_replicas,json=psReplicas,proto3" json:"ps_replicas,omitempty"` + // number of chief replicas spawned in the cluster for this job + ChiefReplicas int32 `protobuf:"varint,3,opt,name=chief_replicas,json=chiefReplicas,proto3" json:"chief_replicas,omitempty"` + // number of evaluator replicas spawned in the cluster for this job + EvaluatorReplicas int32 `protobuf:"varint,4,opt,name=evaluator_replicas,json=evaluatorReplicas,proto3" json:"evaluator_replicas,omitempty"` +} + +func (x *DistributedTensorflowTrainingTask) Reset() { + *x = DistributedTensorflowTrainingTask{} + if protoimpl.UnsafeEnabled { + mi := &file_flyteidl2_plugins_tensorflow_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DistributedTensorflowTrainingTask) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DistributedTensorflowTrainingTask) ProtoMessage() {} + +func (x *DistributedTensorflowTrainingTask) ProtoReflect() protoreflect.Message { + mi := &file_flyteidl2_plugins_tensorflow_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DistributedTensorflowTrainingTask.ProtoReflect.Descriptor instead. +func (*DistributedTensorflowTrainingTask) Descriptor() ([]byte, []int) { + return file_flyteidl2_plugins_tensorflow_proto_rawDescGZIP(), []int{0} +} + +func (x *DistributedTensorflowTrainingTask) GetWorkers() int32 { + if x != nil { + return x.Workers + } + return 0 +} + +func (x *DistributedTensorflowTrainingTask) GetPsReplicas() int32 { + if x != nil { + return x.PsReplicas + } + return 0 +} + +func (x *DistributedTensorflowTrainingTask) GetChiefReplicas() int32 { + if x != nil { + return x.ChiefReplicas + } + return 0 +} + +func (x *DistributedTensorflowTrainingTask) GetEvaluatorReplicas() int32 { + if x != nil { + return x.EvaluatorReplicas + } + return 0 +} + +var File_flyteidl2_plugins_tensorflow_proto protoreflect.FileDescriptor + +var file_flyteidl2_plugins_tensorflow_proto_rawDesc = []byte{ + 0x0a, 0x22, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x74, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xb4, 0x01, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, + 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x18, 0x0a, + 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x73, 0x5f, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x73, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x69, 0x65, + 0x66, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x0d, 0x63, 0x68, 0x69, 0x65, 0x66, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, + 0x2d, 0x0a, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x65, 0x76, 0x61, + 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x42, 0xc6, + 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0f, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, + 0x66, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x48, 0x02, 0x50, 0x01, 0x5a, 0x35, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, + 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, + 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, + 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_flyteidl2_plugins_tensorflow_proto_rawDescOnce sync.Once + file_flyteidl2_plugins_tensorflow_proto_rawDescData = file_flyteidl2_plugins_tensorflow_proto_rawDesc +) + +func file_flyteidl2_plugins_tensorflow_proto_rawDescGZIP() []byte { + file_flyteidl2_plugins_tensorflow_proto_rawDescOnce.Do(func() { + file_flyteidl2_plugins_tensorflow_proto_rawDescData = protoimpl.X.CompressGZIP(file_flyteidl2_plugins_tensorflow_proto_rawDescData) + }) + return file_flyteidl2_plugins_tensorflow_proto_rawDescData +} + +var file_flyteidl2_plugins_tensorflow_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_flyteidl2_plugins_tensorflow_proto_goTypes = []interface{}{ + (*DistributedTensorflowTrainingTask)(nil), // 0: flyteidl2.plugins.DistributedTensorflowTrainingTask +} +var file_flyteidl2_plugins_tensorflow_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_flyteidl2_plugins_tensorflow_proto_init() } +func file_flyteidl2_plugins_tensorflow_proto_init() { + if File_flyteidl2_plugins_tensorflow_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_flyteidl2_plugins_tensorflow_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DistributedTensorflowTrainingTask); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_flyteidl2_plugins_tensorflow_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_flyteidl2_plugins_tensorflow_proto_goTypes, + DependencyIndexes: file_flyteidl2_plugins_tensorflow_proto_depIdxs, + MessageInfos: file_flyteidl2_plugins_tensorflow_proto_msgTypes, + }.Build() + File_flyteidl2_plugins_tensorflow_proto = out.File + file_flyteidl2_plugins_tensorflow_proto_rawDesc = nil + file_flyteidl2_plugins_tensorflow_proto_goTypes = nil + file_flyteidl2_plugins_tensorflow_proto_depIdxs = nil +} diff --git a/gen/go/flyteidl2/plugins/tensorflow.pb.validate.go b/gen/go/flyteidl2/plugins/tensorflow.pb.validate.go new file mode 100644 index 0000000000..2a77a24097 --- /dev/null +++ b/gen/go/flyteidl2/plugins/tensorflow.pb.validate.go @@ -0,0 +1,149 @@ +// Code generated by protoc-gen-validate. DO NOT EDIT. +// source: flyteidl2/plugins/tensorflow.proto + +package plugins + +import ( + "bytes" + "errors" + "fmt" + "net" + "net/mail" + "net/url" + "regexp" + "sort" + "strings" + "time" + "unicode/utf8" + + "google.golang.org/protobuf/types/known/anypb" +) + +// ensure the imports are used +var ( + _ = bytes.MinRead + _ = errors.New("") + _ = fmt.Print + _ = utf8.UTFMax + _ = (*regexp.Regexp)(nil) + _ = (*strings.Reader)(nil) + _ = net.IPv4len + _ = time.Duration(0) + _ = (*url.URL)(nil) + _ = (*mail.Address)(nil) + _ = anypb.Any{} + _ = sort.Sort +) + +// Validate checks the field values on DistributedTensorflowTrainingTask with +// the rules defined in the proto definition for this message. If any rules +// are violated, the first error encountered is returned, or nil if there are +// no violations. +func (m *DistributedTensorflowTrainingTask) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on DistributedTensorflowTrainingTask +// with the rules defined in the proto definition for this message. If any +// rules are violated, the result is a list of violation errors wrapped in +// DistributedTensorflowTrainingTaskMultiError, or nil if none found. +func (m *DistributedTensorflowTrainingTask) ValidateAll() error { + return m.validate(true) +} + +func (m *DistributedTensorflowTrainingTask) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + // no validation rules for Workers + + // no validation rules for PsReplicas + + // no validation rules for ChiefReplicas + + // no validation rules for EvaluatorReplicas + + if len(errors) > 0 { + return DistributedTensorflowTrainingTaskMultiError(errors) + } + + return nil +} + +// DistributedTensorflowTrainingTaskMultiError is an error wrapping multiple +// validation errors returned by +// DistributedTensorflowTrainingTask.ValidateAll() if the designated +// constraints aren't met. +type DistributedTensorflowTrainingTaskMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m DistributedTensorflowTrainingTaskMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m DistributedTensorflowTrainingTaskMultiError) AllErrors() []error { return m } + +// DistributedTensorflowTrainingTaskValidationError is the validation error +// returned by DistributedTensorflowTrainingTask.Validate if the designated +// constraints aren't met. +type DistributedTensorflowTrainingTaskValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e DistributedTensorflowTrainingTaskValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e DistributedTensorflowTrainingTaskValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e DistributedTensorflowTrainingTaskValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e DistributedTensorflowTrainingTaskValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e DistributedTensorflowTrainingTaskValidationError) ErrorName() string { + return "DistributedTensorflowTrainingTaskValidationError" +} + +// Error satisfies the builtin error interface +func (e DistributedTensorflowTrainingTaskValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sDistributedTensorflowTrainingTask.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = DistributedTensorflowTrainingTaskValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = DistributedTensorflowTrainingTaskValidationError{} diff --git a/gen/go/gateway/flyteidl2/app/app_service.swagger.json b/gen/go/gateway/flyteidl2/app/app_service.swagger.json index d1dcfdcec1..55e0c07de1 100644 --- a/gen/go/gateway/flyteidl2/app/app_service.swagger.json +++ b/gen/go/gateway/flyteidl2/app/app_service.swagger.json @@ -1099,7 +1099,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -1107,7 +1107,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -1278,20 +1278,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1682,6 +1668,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreSecret": { "type": "object", "properties": { diff --git a/gen/go/gateway/flyteidl2/cacheservice/cacheservice.swagger.json b/gen/go/gateway/flyteidl2/cacheservice/cacheservice.swagger.json new file mode 100644 index 0000000000..8b17041ebe --- /dev/null +++ b/gen/go/gateway/flyteidl2/cacheservice/cacheservice.swagger.json @@ -0,0 +1,686 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/cacheservice/cacheservice.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "CacheService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "BlobTypeBlobDimensionality": { + "type": "string", + "enum": [ + "SINGLE", + "MULTIPART" + ], + "default": "SINGLE" + }, + "SchemaColumnSchemaColumnType": { + "type": "string", + "enum": [ + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION" + ], + "default": "INTEGER" + }, + "SchemaTypeSchemaColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "A unique name -within the schema type- for the column" + }, + "type": { + "$ref": "#/definitions/SchemaColumnSchemaColumnType", + "description": "The column type. This allows a limited set of types currently." + } + } + }, + "StructuredDatasetTypeDatasetColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A unique name within the schema type for the column." + }, + "literal_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The column type." + } + } + }, + "cacheserviceCachedOutput": { + "type": "object", + "properties": { + "output_literals": { + "$ref": "#/definitions/coreLiteralMap", + "title": "Output literals" + }, + "output_uri": { + "type": "string", + "title": "URI to output data" + }, + "metadata": { + "$ref": "#/definitions/flyteidl2cacheserviceMetadata", + "title": "Associated metadata" + } + }, + "description": "Represents cached output, either as literals or an URI, with associated metadata." + }, + "cacheserviceDeleteCacheResponse": { + "type": "object", + "description": "Response message of cache deletion operation.\n\nEmpty, success indicated by no errors" + }, + "cacheserviceGetCacheResponse": { + "type": "object", + "properties": { + "output": { + "$ref": "#/definitions/cacheserviceCachedOutput", + "title": "Cached output" + } + }, + "description": "Response with cached data for a given key." + }, + "cacheserviceKeyMapMetadata": { + "type": "object", + "properties": { + "values": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Additional metadata as key-value pairs" + } + }, + "title": "Additional metadata as key-value pairs" + }, + "cacheserviceOverwriteOutput": { + "type": "object", + "properties": { + "overwrite": { + "type": "boolean", + "title": "Overwrite flag" + }, + "delete_blob": { + "type": "boolean", + "title": "Delete existing blob" + }, + "max_age": { + "type": "string", + "title": "Maximum age of the cached output since last update" + } + } + }, + "cacheservicePutCacheResponse": { + "type": "object", + "description": "Response message of cache store/update operation.\n\nEmpty, success indicated by no errors" + }, + "coreBinary": { + "type": "object", + "properties": { + "value": { + "type": "string", + "format": "byte", + "description": "Serialized data (MessagePack) for supported types like Dataclass, Pydantic BaseModel, and untyped dict." + }, + "tag": { + "type": "string", + "description": "The serialization format identifier (e.g., MessagePack). Consumers must define unique tags and validate them before deserialization." + } + }, + "description": "A simple byte array with a tag to help different parts of the system communicate about what is in the byte array.\nIt's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data." + }, + "coreBlob": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/coreBlobMetadata" + }, + "uri": { + "type": "string" + } + }, + "description": "Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is.\nThere are no restrictions on how the uri is formatted since it will depend on how to interact with the store." + }, + "coreBlobMetadata": { + "type": "object", + "properties": { + "type": { + "$ref": "#/definitions/coreBlobType" + } + } + }, + "coreBlobType": { + "type": "object", + "properties": { + "format": { + "type": "string", + "title": "Format can be a free form string understood by SDK/UI etc like\ncsv, parquet etc" + }, + "dimensionality": { + "$ref": "#/definitions/BlobTypeBlobDimensionality" + } + }, + "title": "Defines type behavior for blob objects" + }, + "coreError": { + "type": "object", + "properties": { + "failed_node_id": { + "type": "string", + "description": "The node id that threw the error." + }, + "message": { + "type": "string", + "description": "Error message thrown." + } + }, + "description": "Represents an error thrown from a node." + }, + "coreLiteral": { + "type": "object", + "properties": { + "scalar": { + "$ref": "#/definitions/coreScalar", + "description": "A simple value." + }, + "collection": { + "$ref": "#/definitions/coreLiteralCollection", + "description": "A collection of literals to allow nesting." + }, + "map": { + "$ref": "#/definitions/coreLiteralMap", + "description": "A map of strings to literals." + }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, + "hash": { + "type": "string", + "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Additional metadata for literals." + } + }, + "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." + }, + "coreLiteralCollection": { + "type": "object", + "properties": { + "literals": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralMap": { + "type": "object", + "properties": { + "literals": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, + "coreLiteralType": { + "type": "object", + "properties": { + "simple": { + "$ref": "#/definitions/coreSimpleType", + "description": "A simple type that can be compared one-to-one with another." + }, + "schema": { + "$ref": "#/definitions/coreSchemaType", + "description": "A complex type that requires matching of inner fields." + }, + "collection_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a collection. Only homogeneous collections are allowed." + }, + "map_value_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a map type. The type of the key is always a string." + }, + "blob": { + "$ref": "#/definitions/coreBlobType", + "description": "A blob might have specialized implementation details depending on associated metadata." + }, + "enum_type": { + "$ref": "#/definitions/flyteidl2coreEnumType", + "description": "Defines an enum with pre-defined string values." + }, + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "title": "Generalized schema support" + }, + "union_type": { + "$ref": "#/definitions/coreUnionType", + "description": "Defines an union type with pre-defined LiteralTypes." + }, + "metadata": { + "type": "object", + "description": "This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by\nconsumers to identify special behavior or display extended information for the type." + }, + "annotation": { + "$ref": "#/definitions/coreTypeAnnotation", + "description": "This field contains arbitrary data that might have special semantic\nmeaning for the client but does not effect internal flyte behavior." + }, + "structure": { + "$ref": "#/definitions/coreTypeStructure", + "description": "Hints to improve type matching." + } + }, + "description": "Defines a strong type to allow type checking between interfaces." + }, + "corePrimitive": { + "type": "object", + "properties": { + "integer": { + "type": "string", + "format": "int64" + }, + "float_value": { + "type": "number", + "format": "double" + }, + "string_value": { + "type": "string" + }, + "boolean": { + "type": "boolean" + }, + "datetime": { + "type": "string", + "format": "date-time" + }, + "duration": { + "type": "string" + } + }, + "title": "Primitive Types" + }, + "coreResourceType": { + "type": "string", + "enum": [ + "UNSPECIFIED", + "TASK", + "WORKFLOW", + "LAUNCH_PLAN", + "DATASET" + ], + "default": "UNSPECIFIED", + "description": "Indicates a resource type within Flyte.\n\n - DATASET: A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects.\nEventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects\nin a similar manner to other Flyte objects" + }, + "coreScalar": { + "type": "object", + "properties": { + "primitive": { + "$ref": "#/definitions/corePrimitive" + }, + "blob": { + "$ref": "#/definitions/coreBlob" + }, + "binary": { + "$ref": "#/definitions/coreBinary" + }, + "schema": { + "$ref": "#/definitions/coreSchema" + }, + "none_type": { + "$ref": "#/definitions/coreVoid" + }, + "error": { + "$ref": "#/definitions/coreError" + }, + "generic": { + "type": "object" + }, + "structured_dataset": { + "$ref": "#/definitions/coreStructuredDataset" + }, + "union": { + "$ref": "#/definitions/coreUnion" + } + } + }, + "coreSchema": { + "type": "object", + "properties": { + "uri": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/coreSchemaType" + } + }, + "description": "A strongly typed schema that defines the interface of data retrieved from the underlying storage medium." + }, + "coreSchemaType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SchemaTypeSchemaColumn" + }, + "description": "A list of ordered columns this schema comprises of." + } + }, + "description": "Defines schema columns and types to strongly type-validate schemas interoperability." + }, + "coreSimpleType": { + "type": "string", + "enum": [ + "NONE", + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION", + "BINARY", + "ERROR", + "STRUCT" + ], + "default": "NONE", + "description": "Define a set of simple types." + }, + "coreStructuredDataset": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "title": "String location uniquely identifying where the data is.\nShould start with the storage location (e.g. s3://, gs://, bq://, etc.)" + }, + "metadata": { + "$ref": "#/definitions/coreStructuredDatasetMetadata" + } + } + }, + "coreStructuredDatasetMetadata": { + "type": "object", + "properties": { + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "description": "Bundle the type information along with the literal.\nThis is here because StructuredDatasets can often be more defined at run time than at compile time.\nThat is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset,\nwithout any column information, but at run time, you might have that column information.\nflytekit python will copy this type information into the literal, from the type information, if not provided by\nthe various plugins (encoders).\nSince this field is run time generated, it's not used for any type checking." + } + } + }, + "coreStructuredDatasetType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/StructuredDatasetTypeDatasetColumn" + }, + "description": "A list of ordered columns this schema comprises of." + }, + "format": { + "type": "string", + "description": "This is the storage format, the format of the bits at rest\nparquet, feather, csv, etc.\nFor two types to be compatible, the format will need to be an exact match." + }, + "external_schema_type": { + "type": "string", + "description": "This is a string representing the type that the bytes in external_schema_bytes are formatted in.\nThis is an optional field that will not be used for type checking." + }, + "external_schema_bytes": { + "type": "string", + "format": "byte", + "description": "The serialized bytes of a third-party schema library like Arrow.\nThis is an optional field that will not be used for type checking." + } + } + }, + "coreTypeAnnotation": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "description": "A arbitrary JSON payload to describe a type." + } + }, + "description": "TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs." + }, + "coreTypeStructure": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "title": "Must exactly match for types to be castable" + }, + "dataclass_type": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteralType" + }, + "title": "dataclass_type only exists for dataclasses.\nThis is used to resolve the type of the fields of dataclass\nThe key is the field name, and the value is the literal type of the field\ne.g. For dataclass Foo, with fields a, and a is a string\nFoo.a will be resolved as a literal type of string from dataclass_type" + } + }, + "description": "Hints to improve type matching\ne.g. allows distinguishing output from custom type transformers\neven if the underlying IDL serialization matches." + }, + "coreUnion": { + "type": "object", + "properties": { + "value": { + "$ref": "#/definitions/coreLiteral" + }, + "type": { + "$ref": "#/definitions/coreLiteralType" + } + }, + "description": "The runtime representation of a tagged union value. See `UnionType` for more details." + }, + "coreUnionType": { + "type": "object", + "properties": { + "variants": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteralType" + }, + "description": "Predefined set of variants in union." + } + }, + "description": "Defines a tagged union type, also known as a variant (and formally as the sum type).\n\nA sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag\nA value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by\nstoring the varaint's tag with the literal value and can be examined in runtime.\n\nType S is typically written as\nS := Apple A | Banana B | Cantaloupe C | ...\n\nNotably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value:\nOptional X := X | Null\n\nSee also: https://en.wikipedia.org/wiki/Tagged_union" + }, + "coreVoid": { + "type": "object", + "description": "Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally\nundefined since it can be assigned to a scalar of any LiteralType." + }, + "flyteidl2cacheserviceGetOrExtendReservationResponse": { + "type": "object", + "properties": { + "reservation": { + "$ref": "#/definitions/flyteidl2cacheserviceReservation", + "title": "The reservation that was created or extended" + } + }, + "title": "Request to get or extend a reservation for a cache key" + }, + "flyteidl2cacheserviceMetadata": { + "type": "object", + "properties": { + "source_identifier": { + "$ref": "#/definitions/flyteidl2coreIdentifier", + "title": "Source task or workflow identifier" + }, + "key_map": { + "$ref": "#/definitions/cacheserviceKeyMapMetadata", + "title": "Additional metadata as key-value pairs" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Creation timestamp" + }, + "last_updated_at": { + "type": "string", + "format": "date-time", + "title": "Last update timestamp" + } + }, + "description": "Metadata for cached outputs, including the source identifier and timestamps." + }, + "flyteidl2cacheserviceReleaseReservationResponse": { + "type": "object", + "description": "Response message of release reservation operation.\n\nEmpty, success indicated by no errors" + }, + "flyteidl2cacheserviceReservation": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "The unique ID for the reservation - same as the cache key" + }, + "owner_id": { + "type": "string", + "title": "The unique ID of the owner for the reservation" + }, + "heartbeat_interval": { + "type": "string", + "title": "Requested reservation extension heartbeat interval" + }, + "expires_at": { + "type": "string", + "format": "date-time", + "title": "Expiration timestamp of this reservation" + } + }, + "description": "A reservation including owner, heartbeat interval, expiration timestamp, and various metadata." + }, + "flyteidl2coreEnumType": { + "type": "object", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Predefined set of enum values." + } + }, + "description": "Enables declaring enum types, with predefined string values\nFor len(values) \u003e 0, the first value in the ordered list is regarded as the default value. If you wish\nTo provide no defaults, make the first value as undefined." + }, + "flyteidl2coreIdentifier": { + "type": "object", + "properties": { + "resource_type": { + "$ref": "#/definitions/coreResourceType", + "description": "Identifies the specific type of resource that this identifier corresponds to." + }, + "project": { + "type": "string", + "description": "Name of the project the resource belongs to." + }, + "domain": { + "type": "string", + "description": "Name of the domain the resource belongs to.\nA domain can be considered as a subset within a specific project." + }, + "name": { + "type": "string", + "description": "User provided value for the resource." + }, + "version": { + "type": "string", + "description": "Specific version of the resource." + }, + "org": { + "type": "string", + "description": "Optional, org key applied to the resource." + } + }, + "description": "Encapsulation of fields that uniquely identifies a Flyte resource." + }, + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + }, + "protobufNullValue": { + "type": "string", + "enum": [ + "NULL_VALUE" + ], + "default": "NULL_VALUE", + "description": "`NullValue` is a singleton enumeration to represent the null value for the\n`Value` type union.\n\nThe JSON representation for `NullValue` is JSON `null`.\n\n - NULL_VALUE: Null value." + } + } +} diff --git a/gen/go/gateway/flyteidl2/cacheservice/v2/cacheservice.swagger.json b/gen/go/gateway/flyteidl2/cacheservice/v2/cacheservice.swagger.json new file mode 100644 index 0000000000..97a0c86d60 --- /dev/null +++ b/gen/go/gateway/flyteidl2/cacheservice/v2/cacheservice.swagger.json @@ -0,0 +1,774 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/cacheservice/v2/cacheservice.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "CacheService" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "BlobTypeBlobDimensionality": { + "type": "string", + "enum": [ + "SINGLE", + "MULTIPART" + ], + "default": "SINGLE" + }, + "SchemaColumnSchemaColumnType": { + "type": "string", + "enum": [ + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION" + ], + "default": "INTEGER" + }, + "SchemaTypeSchemaColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "A unique name -within the schema type- for the column" + }, + "type": { + "$ref": "#/definitions/SchemaColumnSchemaColumnType", + "description": "The column type. This allows a limited set of types currently." + } + } + }, + "StructuredDatasetTypeDatasetColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A unique name within the schema type for the column." + }, + "literal_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The column type." + } + } + }, + "cacheserviceCachedOutput": { + "type": "object", + "properties": { + "output_literals": { + "$ref": "#/definitions/coreLiteralMap", + "title": "Output literals" + }, + "output_uri": { + "type": "string", + "title": "URI to output data" + }, + "metadata": { + "$ref": "#/definitions/flyteidl2cacheserviceMetadata", + "title": "Associated metadata" + } + }, + "description": "Represents cached output, either as literals or an URI, with associated metadata." + }, + "cacheserviceDeleteCacheResponse": { + "type": "object", + "description": "Response message of cache deletion operation.\n\nEmpty, success indicated by no errors" + }, + "cacheserviceGetCacheResponse": { + "type": "object", + "properties": { + "output": { + "$ref": "#/definitions/cacheserviceCachedOutput", + "title": "Cached output" + } + }, + "description": "Response with cached data for a given key." + }, + "cacheserviceKeyMapMetadata": { + "type": "object", + "properties": { + "values": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Additional metadata as key-value pairs" + } + }, + "title": "Additional metadata as key-value pairs" + }, + "cacheserviceOverwriteOutput": { + "type": "object", + "properties": { + "overwrite": { + "type": "boolean", + "title": "Overwrite flag" + }, + "delete_blob": { + "type": "boolean", + "title": "Delete existing blob" + }, + "max_age": { + "type": "string", + "title": "Maximum age of the cached output since last update" + } + } + }, + "cacheservicePutCacheResponse": { + "type": "object", + "description": "Response message of cache store/update operation.\n\nEmpty, success indicated by no errors" + }, + "cacheservicev2Identifier": { + "type": "object", + "properties": { + "org": { + "type": "string", + "title": "Organization identifier" + }, + "project": { + "type": "string", + "title": "Project identifier" + }, + "domain": { + "type": "string", + "title": "Domain identifier" + } + }, + "description": "Identifier for cache operations, including org, project, and domain.\nThis is used to scope cache operations to specific organizational contexts." + }, + "coreBinary": { + "type": "object", + "properties": { + "value": { + "type": "string", + "format": "byte", + "description": "Serialized data (MessagePack) for supported types like Dataclass, Pydantic BaseModel, and untyped dict." + }, + "tag": { + "type": "string", + "description": "The serialization format identifier (e.g., MessagePack). Consumers must define unique tags and validate them before deserialization." + } + }, + "description": "A simple byte array with a tag to help different parts of the system communicate about what is in the byte array.\nIt's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data." + }, + "coreBlob": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/coreBlobMetadata" + }, + "uri": { + "type": "string" + } + }, + "description": "Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is.\nThere are no restrictions on how the uri is formatted since it will depend on how to interact with the store." + }, + "coreBlobMetadata": { + "type": "object", + "properties": { + "type": { + "$ref": "#/definitions/coreBlobType" + } + } + }, + "coreBlobType": { + "type": "object", + "properties": { + "format": { + "type": "string", + "title": "Format can be a free form string understood by SDK/UI etc like\ncsv, parquet etc" + }, + "dimensionality": { + "$ref": "#/definitions/BlobTypeBlobDimensionality" + } + }, + "title": "Defines type behavior for blob objects" + }, + "coreError": { + "type": "object", + "properties": { + "failed_node_id": { + "type": "string", + "description": "The node id that threw the error." + }, + "message": { + "type": "string", + "description": "Error message thrown." + } + }, + "description": "Represents an error thrown from a node." + }, + "coreLiteral": { + "type": "object", + "properties": { + "scalar": { + "$ref": "#/definitions/coreScalar", + "description": "A simple value." + }, + "collection": { + "$ref": "#/definitions/coreLiteralCollection", + "description": "A collection of literals to allow nesting." + }, + "map": { + "$ref": "#/definitions/coreLiteralMap", + "description": "A map of strings to literals." + }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, + "hash": { + "type": "string", + "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Additional metadata for literals." + } + }, + "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." + }, + "coreLiteralCollection": { + "type": "object", + "properties": { + "literals": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralMap": { + "type": "object", + "properties": { + "literals": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, + "coreLiteralType": { + "type": "object", + "properties": { + "simple": { + "$ref": "#/definitions/coreSimpleType", + "description": "A simple type that can be compared one-to-one with another." + }, + "schema": { + "$ref": "#/definitions/coreSchemaType", + "description": "A complex type that requires matching of inner fields." + }, + "collection_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a collection. Only homogeneous collections are allowed." + }, + "map_value_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a map type. The type of the key is always a string." + }, + "blob": { + "$ref": "#/definitions/coreBlobType", + "description": "A blob might have specialized implementation details depending on associated metadata." + }, + "enum_type": { + "$ref": "#/definitions/flyteidl2coreEnumType", + "description": "Defines an enum with pre-defined string values." + }, + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "title": "Generalized schema support" + }, + "union_type": { + "$ref": "#/definitions/coreUnionType", + "description": "Defines an union type with pre-defined LiteralTypes." + }, + "metadata": { + "type": "object", + "description": "This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by\nconsumers to identify special behavior or display extended information for the type." + }, + "annotation": { + "$ref": "#/definitions/coreTypeAnnotation", + "description": "This field contains arbitrary data that might have special semantic\nmeaning for the client but does not effect internal flyte behavior." + }, + "structure": { + "$ref": "#/definitions/coreTypeStructure", + "description": "Hints to improve type matching." + } + }, + "description": "Defines a strong type to allow type checking between interfaces." + }, + "corePrimitive": { + "type": "object", + "properties": { + "integer": { + "type": "string", + "format": "int64" + }, + "float_value": { + "type": "number", + "format": "double" + }, + "string_value": { + "type": "string" + }, + "boolean": { + "type": "boolean" + }, + "datetime": { + "type": "string", + "format": "date-time" + }, + "duration": { + "type": "string" + } + }, + "title": "Primitive Types" + }, + "coreResourceType": { + "type": "string", + "enum": [ + "UNSPECIFIED", + "TASK", + "WORKFLOW", + "LAUNCH_PLAN", + "DATASET" + ], + "default": "UNSPECIFIED", + "description": "Indicates a resource type within Flyte.\n\n - DATASET: A dataset represents an entity modeled in Flyte DataCatalog. A Dataset is also a versioned entity and can be a compilation of multiple individual objects.\nEventually all Catalog objects should be modeled similar to Flyte Objects. The Dataset entities makes it possible for the UI and CLI to act on the objects\nin a similar manner to other Flyte objects" + }, + "coreScalar": { + "type": "object", + "properties": { + "primitive": { + "$ref": "#/definitions/corePrimitive" + }, + "blob": { + "$ref": "#/definitions/coreBlob" + }, + "binary": { + "$ref": "#/definitions/coreBinary" + }, + "schema": { + "$ref": "#/definitions/coreSchema" + }, + "none_type": { + "$ref": "#/definitions/coreVoid" + }, + "error": { + "$ref": "#/definitions/coreError" + }, + "generic": { + "type": "object" + }, + "structured_dataset": { + "$ref": "#/definitions/coreStructuredDataset" + }, + "union": { + "$ref": "#/definitions/coreUnion" + } + } + }, + "coreSchema": { + "type": "object", + "properties": { + "uri": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/coreSchemaType" + } + }, + "description": "A strongly typed schema that defines the interface of data retrieved from the underlying storage medium." + }, + "coreSchemaType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SchemaTypeSchemaColumn" + }, + "description": "A list of ordered columns this schema comprises of." + } + }, + "description": "Defines schema columns and types to strongly type-validate schemas interoperability." + }, + "coreSimpleType": { + "type": "string", + "enum": [ + "NONE", + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION", + "BINARY", + "ERROR", + "STRUCT" + ], + "default": "NONE", + "description": "Define a set of simple types." + }, + "coreStructuredDataset": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "title": "String location uniquely identifying where the data is.\nShould start with the storage location (e.g. s3://, gs://, bq://, etc.)" + }, + "metadata": { + "$ref": "#/definitions/coreStructuredDatasetMetadata" + } + } + }, + "coreStructuredDatasetMetadata": { + "type": "object", + "properties": { + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "description": "Bundle the type information along with the literal.\nThis is here because StructuredDatasets can often be more defined at run time than at compile time.\nThat is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset,\nwithout any column information, but at run time, you might have that column information.\nflytekit python will copy this type information into the literal, from the type information, if not provided by\nthe various plugins (encoders).\nSince this field is run time generated, it's not used for any type checking." + } + } + }, + "coreStructuredDatasetType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/StructuredDatasetTypeDatasetColumn" + }, + "description": "A list of ordered columns this schema comprises of." + }, + "format": { + "type": "string", + "description": "This is the storage format, the format of the bits at rest\nparquet, feather, csv, etc.\nFor two types to be compatible, the format will need to be an exact match." + }, + "external_schema_type": { + "type": "string", + "description": "This is a string representing the type that the bytes in external_schema_bytes are formatted in.\nThis is an optional field that will not be used for type checking." + }, + "external_schema_bytes": { + "type": "string", + "format": "byte", + "description": "The serialized bytes of a third-party schema library like Arrow.\nThis is an optional field that will not be used for type checking." + } + } + }, + "coreTypeAnnotation": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "description": "A arbitrary JSON payload to describe a type." + } + }, + "description": "TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs." + }, + "coreTypeStructure": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "title": "Must exactly match for types to be castable" + }, + "dataclass_type": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteralType" + }, + "title": "dataclass_type only exists for dataclasses.\nThis is used to resolve the type of the fields of dataclass\nThe key is the field name, and the value is the literal type of the field\ne.g. For dataclass Foo, with fields a, and a is a string\nFoo.a will be resolved as a literal type of string from dataclass_type" + } + }, + "description": "Hints to improve type matching\ne.g. allows distinguishing output from custom type transformers\neven if the underlying IDL serialization matches." + }, + "coreUnion": { + "type": "object", + "properties": { + "value": { + "$ref": "#/definitions/coreLiteral" + }, + "type": { + "$ref": "#/definitions/coreLiteralType" + } + }, + "description": "The runtime representation of a tagged union value. See `UnionType` for more details." + }, + "coreUnionType": { + "type": "object", + "properties": { + "variants": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteralType" + }, + "description": "Predefined set of variants in union." + } + }, + "description": "Defines a tagged union type, also known as a variant (and formally as the sum type).\n\nA sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag\nA value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by\nstoring the varaint's tag with the literal value and can be examined in runtime.\n\nType S is typically written as\nS := Apple A | Banana B | Cantaloupe C | ...\n\nNotably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value:\nOptional X := X | Null\n\nSee also: https://en.wikipedia.org/wiki/Tagged_union" + }, + "coreVoid": { + "type": "object", + "description": "Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally\nundefined since it can be assigned to a scalar of any LiteralType." + }, + "flyteidl2cacheserviceDeleteCacheRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Cache key" + } + }, + "description": "Request to delete cached data by key." + }, + "flyteidl2cacheserviceGetCacheRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Cache key" + } + }, + "description": "Request to retrieve cached data by key." + }, + "flyteidl2cacheserviceGetOrExtendReservationRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "The unique ID for the reservation - same as the cache key" + }, + "owner_id": { + "type": "string", + "title": "The unique ID of the owner for the reservation" + }, + "heartbeat_interval": { + "type": "string", + "title": "Requested reservation extension heartbeat interval" + } + }, + "title": "Request to get or extend a reservation for a cache key" + }, + "flyteidl2cacheserviceGetOrExtendReservationResponse": { + "type": "object", + "properties": { + "reservation": { + "$ref": "#/definitions/flyteidl2cacheserviceReservation", + "title": "The reservation that was created or extended" + } + }, + "title": "Request to get or extend a reservation for a cache key" + }, + "flyteidl2cacheserviceMetadata": { + "type": "object", + "properties": { + "source_identifier": { + "$ref": "#/definitions/flyteidl2coreIdentifier", + "title": "Source task or workflow identifier" + }, + "key_map": { + "$ref": "#/definitions/cacheserviceKeyMapMetadata", + "title": "Additional metadata as key-value pairs" + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "Creation timestamp" + }, + "last_updated_at": { + "type": "string", + "format": "date-time", + "title": "Last update timestamp" + } + }, + "description": "Metadata for cached outputs, including the source identifier and timestamps." + }, + "flyteidl2cacheservicePutCacheRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "Cache key" + }, + "output": { + "$ref": "#/definitions/cacheserviceCachedOutput", + "title": "Output to cache" + }, + "overwrite": { + "$ref": "#/definitions/cacheserviceOverwriteOutput", + "title": "Overwrite flag if exists" + } + }, + "description": "Request to store/update cached data by key." + }, + "flyteidl2cacheserviceReleaseReservationRequest": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "The unique ID for the reservation - same as the cache key" + }, + "owner_id": { + "type": "string", + "title": "The unique ID of the owner for the reservation" + } + }, + "title": "Request to release the reservation for a cache key" + }, + "flyteidl2cacheserviceReleaseReservationResponse": { + "type": "object", + "description": "Response message of release reservation operation.\n\nEmpty, success indicated by no errors" + }, + "flyteidl2cacheserviceReservation": { + "type": "object", + "properties": { + "key": { + "type": "string", + "title": "The unique ID for the reservation - same as the cache key" + }, + "owner_id": { + "type": "string", + "title": "The unique ID of the owner for the reservation" + }, + "heartbeat_interval": { + "type": "string", + "title": "Requested reservation extension heartbeat interval" + }, + "expires_at": { + "type": "string", + "format": "date-time", + "title": "Expiration timestamp of this reservation" + } + }, + "description": "A reservation including owner, heartbeat interval, expiration timestamp, and various metadata." + }, + "flyteidl2coreEnumType": { + "type": "object", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Predefined set of enum values." + } + }, + "description": "Enables declaring enum types, with predefined string values\nFor len(values) \u003e 0, the first value in the ordered list is regarded as the default value. If you wish\nTo provide no defaults, make the first value as undefined." + }, + "flyteidl2coreIdentifier": { + "type": "object", + "properties": { + "resource_type": { + "$ref": "#/definitions/coreResourceType", + "description": "Identifies the specific type of resource that this identifier corresponds to." + }, + "project": { + "type": "string", + "description": "Name of the project the resource belongs to." + }, + "domain": { + "type": "string", + "description": "Name of the domain the resource belongs to.\nA domain can be considered as a subset within a specific project." + }, + "name": { + "type": "string", + "description": "User provided value for the resource." + }, + "version": { + "type": "string", + "description": "Specific version of the resource." + }, + "org": { + "type": "string", + "description": "Optional, org key applied to the resource." + } + }, + "description": "Encapsulation of fields that uniquely identifies a Flyte resource." + }, + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + }, + "protobufNullValue": { + "type": "string", + "enum": [ + "NULL_VALUE" + ], + "default": "NULL_VALUE", + "description": "`NullValue` is a singleton enumeration to represent the null value for the\n`Value` type union.\n\nThe JSON representation for `NullValue` is JSON `null`.\n\n - NULL_VALUE: Null value." + } + } +} diff --git a/gen/go/gateway/flyteidl2/common/configuration.swagger.json b/gen/go/gateway/flyteidl2/common/configuration.swagger.json new file mode 100644 index 0000000000..96e49806dc --- /dev/null +++ b/gen/go/gateway/flyteidl2/common/configuration.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/common/configuration.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/core/errors.swagger.json b/gen/go/gateway/flyteidl2/core/errors.swagger.json new file mode 100644 index 0000000000..24a8fb5678 --- /dev/null +++ b/gen/go/gateway/flyteidl2/core/errors.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/core/errors.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/datacatalog/datacatalog.swagger.json b/gen/go/gateway/flyteidl2/datacatalog/datacatalog.swagger.json new file mode 100644 index 0000000000..128e01b5e2 --- /dev/null +++ b/gen/go/gateway/flyteidl2/datacatalog/datacatalog.swagger.json @@ -0,0 +1,937 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/datacatalog/datacatalog.proto", + "version": "version not set" + }, + "tags": [ + { + "name": "DataCatalog" + } + ], + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "BlobTypeBlobDimensionality": { + "type": "string", + "enum": [ + "SINGLE", + "MULTIPART" + ], + "default": "SINGLE" + }, + "PaginationOptionsSortKey": { + "type": "string", + "enum": [ + "CREATION_TIME" + ], + "default": "CREATION_TIME" + }, + "PaginationOptionsSortOrder": { + "type": "string", + "enum": [ + "DESCENDING", + "ASCENDING" + ], + "default": "DESCENDING" + }, + "SchemaColumnSchemaColumnType": { + "type": "string", + "enum": [ + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION" + ], + "default": "INTEGER" + }, + "SchemaTypeSchemaColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "A unique name -within the schema type- for the column" + }, + "type": { + "$ref": "#/definitions/SchemaColumnSchemaColumnType", + "description": "The column type. This allows a limited set of types currently." + } + } + }, + "SinglePropertyFilterComparisonOperator": { + "type": "string", + "enum": [ + "EQUALS" + ], + "default": "EQUALS", + "description": "as use-cases come up we can add more operators, ex: gte, like, not eq etc." + }, + "StructuredDatasetTypeDatasetColumn": { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A unique name within the schema type for the column." + }, + "literal_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The column type." + } + } + }, + "coreBinary": { + "type": "object", + "properties": { + "value": { + "type": "string", + "format": "byte", + "description": "Serialized data (MessagePack) for supported types like Dataclass, Pydantic BaseModel, and untyped dict." + }, + "tag": { + "type": "string", + "description": "The serialization format identifier (e.g., MessagePack). Consumers must define unique tags and validate them before deserialization." + } + }, + "description": "A simple byte array with a tag to help different parts of the system communicate about what is in the byte array.\nIt's strongly advisable that consumers of this type define a unique tag and validate the tag before parsing the data." + }, + "coreBlob": { + "type": "object", + "properties": { + "metadata": { + "$ref": "#/definitions/coreBlobMetadata" + }, + "uri": { + "type": "string" + } + }, + "description": "Refers to an offloaded set of files. It encapsulates the type of the store and a unique uri for where the data is.\nThere are no restrictions on how the uri is formatted since it will depend on how to interact with the store." + }, + "coreBlobMetadata": { + "type": "object", + "properties": { + "type": { + "$ref": "#/definitions/coreBlobType" + } + } + }, + "coreBlobType": { + "type": "object", + "properties": { + "format": { + "type": "string", + "title": "Format can be a free form string understood by SDK/UI etc like\ncsv, parquet etc" + }, + "dimensionality": { + "$ref": "#/definitions/BlobTypeBlobDimensionality" + } + }, + "title": "Defines type behavior for blob objects" + }, + "coreError": { + "type": "object", + "properties": { + "failed_node_id": { + "type": "string", + "description": "The node id that threw the error." + }, + "message": { + "type": "string", + "description": "Error message thrown." + } + }, + "description": "Represents an error thrown from a node." + }, + "coreLiteral": { + "type": "object", + "properties": { + "scalar": { + "$ref": "#/definitions/coreScalar", + "description": "A simple value." + }, + "collection": { + "$ref": "#/definitions/coreLiteralCollection", + "description": "A collection of literals to allow nesting." + }, + "map": { + "$ref": "#/definitions/coreLiteralMap", + "description": "A map of strings to literals." + }, + "offloaded_metadata": { + "$ref": "#/definitions/coreLiteralOffloadedMetadata", + "description": "Offloaded literal metadata\nWhen you deserialize the offloaded metadata, it would be of Literal and its type would be defined by LiteralType stored in offloaded_metadata." + }, + "hash": { + "type": "string", + "title": "A hash representing this literal.\nThis is used for caching purposes. For more details refer to RFC 1893\n(https://github.com/flyteorg/flyte/blob/master/rfc/system/1893-caching-of-offloaded-objects.md)" + }, + "metadata": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "description": "Additional metadata for literals." + } + }, + "description": "A simple value. This supports any level of nesting (e.g. array of array of array of Blobs) as well as simple primitives." + }, + "coreLiteralCollection": { + "type": "object", + "properties": { + "literals": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A collection of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralMap": { + "type": "object", + "properties": { + "literals": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteral" + } + } + }, + "description": "A map of literals. This is a workaround since oneofs in proto messages cannot contain a repeated field." + }, + "coreLiteralOffloadedMetadata": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "description": "The location of the offloaded core.Literal." + }, + "size_bytes": { + "type": "string", + "format": "uint64", + "description": "The size of the offloaded data." + }, + "inferred_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "The inferred literal type of the offloaded data." + } + }, + "description": "A message that contains the metadata of the offloaded data." + }, + "coreLiteralType": { + "type": "object", + "properties": { + "simple": { + "$ref": "#/definitions/coreSimpleType", + "description": "A simple type that can be compared one-to-one with another." + }, + "schema": { + "$ref": "#/definitions/coreSchemaType", + "description": "A complex type that requires matching of inner fields." + }, + "collection_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a collection. Only homogeneous collections are allowed." + }, + "map_value_type": { + "$ref": "#/definitions/coreLiteralType", + "description": "Defines the type of the value of a map type. The type of the key is always a string." + }, + "blob": { + "$ref": "#/definitions/coreBlobType", + "description": "A blob might have specialized implementation details depending on associated metadata." + }, + "enum_type": { + "$ref": "#/definitions/flyteidl2coreEnumType", + "description": "Defines an enum with pre-defined string values." + }, + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "title": "Generalized schema support" + }, + "union_type": { + "$ref": "#/definitions/coreUnionType", + "description": "Defines an union type with pre-defined LiteralTypes." + }, + "metadata": { + "type": "object", + "description": "This field contains type metadata that is descriptive of the type, but is NOT considered in type-checking. This might be used by\nconsumers to identify special behavior or display extended information for the type." + }, + "annotation": { + "$ref": "#/definitions/coreTypeAnnotation", + "description": "This field contains arbitrary data that might have special semantic\nmeaning for the client but does not effect internal flyte behavior." + }, + "structure": { + "$ref": "#/definitions/coreTypeStructure", + "description": "Hints to improve type matching." + } + }, + "description": "Defines a strong type to allow type checking between interfaces." + }, + "corePrimitive": { + "type": "object", + "properties": { + "integer": { + "type": "string", + "format": "int64" + }, + "float_value": { + "type": "number", + "format": "double" + }, + "string_value": { + "type": "string" + }, + "boolean": { + "type": "boolean" + }, + "datetime": { + "type": "string", + "format": "date-time" + }, + "duration": { + "type": "string" + } + }, + "title": "Primitive Types" + }, + "coreScalar": { + "type": "object", + "properties": { + "primitive": { + "$ref": "#/definitions/corePrimitive" + }, + "blob": { + "$ref": "#/definitions/coreBlob" + }, + "binary": { + "$ref": "#/definitions/coreBinary" + }, + "schema": { + "$ref": "#/definitions/coreSchema" + }, + "none_type": { + "$ref": "#/definitions/coreVoid" + }, + "error": { + "$ref": "#/definitions/coreError" + }, + "generic": { + "type": "object" + }, + "structured_dataset": { + "$ref": "#/definitions/coreStructuredDataset" + }, + "union": { + "$ref": "#/definitions/coreUnion" + } + } + }, + "coreSchema": { + "type": "object", + "properties": { + "uri": { + "type": "string" + }, + "type": { + "$ref": "#/definitions/coreSchemaType" + } + }, + "description": "A strongly typed schema that defines the interface of data retrieved from the underlying storage medium." + }, + "coreSchemaType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/SchemaTypeSchemaColumn" + }, + "description": "A list of ordered columns this schema comprises of." + } + }, + "description": "Defines schema columns and types to strongly type-validate schemas interoperability." + }, + "coreSimpleType": { + "type": "string", + "enum": [ + "NONE", + "INTEGER", + "FLOAT", + "STRING", + "BOOLEAN", + "DATETIME", + "DURATION", + "BINARY", + "ERROR", + "STRUCT" + ], + "default": "NONE", + "description": "Define a set of simple types." + }, + "coreStructuredDataset": { + "type": "object", + "properties": { + "uri": { + "type": "string", + "title": "String location uniquely identifying where the data is.\nShould start with the storage location (e.g. s3://, gs://, bq://, etc.)" + }, + "metadata": { + "$ref": "#/definitions/coreStructuredDatasetMetadata" + } + } + }, + "coreStructuredDatasetMetadata": { + "type": "object", + "properties": { + "structured_dataset_type": { + "$ref": "#/definitions/coreStructuredDatasetType", + "description": "Bundle the type information along with the literal.\nThis is here because StructuredDatasets can often be more defined at run time than at compile time.\nThat is, at compile time you might only declare a task to return a pandas dataframe or a StructuredDataset,\nwithout any column information, but at run time, you might have that column information.\nflytekit python will copy this type information into the literal, from the type information, if not provided by\nthe various plugins (encoders).\nSince this field is run time generated, it's not used for any type checking." + } + } + }, + "coreStructuredDatasetType": { + "type": "object", + "properties": { + "columns": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/StructuredDatasetTypeDatasetColumn" + }, + "description": "A list of ordered columns this schema comprises of." + }, + "format": { + "type": "string", + "description": "This is the storage format, the format of the bits at rest\nparquet, feather, csv, etc.\nFor two types to be compatible, the format will need to be an exact match." + }, + "external_schema_type": { + "type": "string", + "description": "This is a string representing the type that the bytes in external_schema_bytes are formatted in.\nThis is an optional field that will not be used for type checking." + }, + "external_schema_bytes": { + "type": "string", + "format": "byte", + "description": "The serialized bytes of a third-party schema library like Arrow.\nThis is an optional field that will not be used for type checking." + } + } + }, + "coreTypeAnnotation": { + "type": "object", + "properties": { + "annotations": { + "type": "object", + "description": "A arbitrary JSON payload to describe a type." + } + }, + "description": "TypeAnnotation encapsulates registration time information about a type. This can be used for various control-plane operations. TypeAnnotation will not be available at runtime when a task runs." + }, + "coreTypeStructure": { + "type": "object", + "properties": { + "tag": { + "type": "string", + "title": "Must exactly match for types to be castable" + }, + "dataclass_type": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/coreLiteralType" + }, + "title": "dataclass_type only exists for dataclasses.\nThis is used to resolve the type of the fields of dataclass\nThe key is the field name, and the value is the literal type of the field\ne.g. For dataclass Foo, with fields a, and a is a string\nFoo.a will be resolved as a literal type of string from dataclass_type" + } + }, + "description": "Hints to improve type matching\ne.g. allows distinguishing output from custom type transformers\neven if the underlying IDL serialization matches." + }, + "coreUnion": { + "type": "object", + "properties": { + "value": { + "$ref": "#/definitions/coreLiteral" + }, + "type": { + "$ref": "#/definitions/coreLiteralType" + } + }, + "description": "The runtime representation of a tagged union value. See `UnionType` for more details." + }, + "coreUnionType": { + "type": "object", + "properties": { + "variants": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/coreLiteralType" + }, + "description": "Predefined set of variants in union." + } + }, + "description": "Defines a tagged union type, also known as a variant (and formally as the sum type).\n\nA sum type S is defined by a sequence of types (A, B, C, ...), each tagged by a string tag\nA value of type S is constructed from a value of any of the variant types. The specific choice of type is recorded by\nstoring the varaint's tag with the literal value and can be examined in runtime.\n\nType S is typically written as\nS := Apple A | Banana B | Cantaloupe C | ...\n\nNotably, a nullable (optional) type is a sum type between some type X and the singleton type representing a null-value:\nOptional X := X | Null\n\nSee also: https://en.wikipedia.org/wiki/Tagged_union" + }, + "coreVoid": { + "type": "object", + "description": "Used to denote a nil/null/None assignment to a scalar value. The underlying LiteralType for Void is intentionally\nundefined since it can be assigned to a scalar of any LiteralType." + }, + "datacatalogAddTagResponse": { + "type": "object", + "description": "Response message for tagging an Artifact." + }, + "datacatalogArtifact": { + "type": "object", + "properties": { + "id": { + "type": "string", + "title": "The unique ID of the artifact" + }, + "dataset": { + "$ref": "#/definitions/datacatalogDatasetID", + "title": "The Dataset that the artifact belongs to" + }, + "data": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogArtifactData" + }, + "title": "A list of data that is associated with the artifact" + }, + "metadata": { + "$ref": "#/definitions/flyteidl2datacatalogMetadata", + "title": "Free-form metadata associated with the artifact" + }, + "partitions": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogPartition" + } + }, + "tags": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogTag" + } + }, + "created_at": { + "type": "string", + "format": "date-time", + "title": "creation timestamp of artifact, autogenerated by service" + } + }, + "description": "Artifact message. It is composed of several string fields." + }, + "datacatalogArtifactData": { + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "value": { + "$ref": "#/definitions/coreLiteral" + } + }, + "title": "ArtifactData that belongs to an artifact" + }, + "datacatalogArtifactPropertyFilter": { + "type": "object", + "properties": { + "artifact_id": { + "type": "string" + } + }, + "title": "Artifact properties we can filter by" + }, + "datacatalogCreateArtifactResponse": { + "type": "object", + "description": "Response message for creating an Artifact." + }, + "datacatalogCreateDatasetResponse": { + "type": "object", + "title": "Response message for creating a Dataset" + }, + "datacatalogDataset": { + "type": "object", + "properties": { + "id": { + "$ref": "#/definitions/datacatalogDatasetID" + }, + "metadata": { + "$ref": "#/definitions/flyteidl2datacatalogMetadata" + }, + "partitionKeys": { + "type": "array", + "items": { + "type": "string" + } + } + }, + "description": "Dataset message. It is uniquely identified by DatasetID." + }, + "datacatalogDatasetID": { + "type": "object", + "properties": { + "project": { + "type": "string", + "title": "The name of the project" + }, + "name": { + "type": "string", + "title": "The name of the dataset" + }, + "domain": { + "type": "string", + "title": "The domain (eg. environment)" + }, + "version": { + "type": "string", + "title": "Version of the data schema" + }, + "UUID": { + "type": "string", + "title": "UUID for the dataset (if set the above fields are optional)" + }, + "org": { + "type": "string", + "description": "Optional, org key applied to the resource." + } + }, + "description": "DatasetID message that is composed of several string fields." + }, + "datacatalogDatasetPropertyFilter": { + "type": "object", + "properties": { + "project": { + "type": "string" + }, + "name": { + "type": "string" + }, + "domain": { + "type": "string" + }, + "version": { + "type": "string" + }, + "org": { + "type": "string", + "description": "Optional, org key applied to the dataset." + } + }, + "title": "Dataset properties we can filter by" + }, + "datacatalogFilterExpression": { + "type": "object", + "properties": { + "filters": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogSinglePropertyFilter" + } + } + }, + "title": "Filter expression that is composed of a combination of single filters" + }, + "datacatalogGetArtifactResponse": { + "type": "object", + "properties": { + "artifact": { + "$ref": "#/definitions/datacatalogArtifact" + } + }, + "description": "Response message for retrieving an Artifact. The result returned will include the artifact data\nand metadata associated with the artifact." + }, + "datacatalogGetDatasetResponse": { + "type": "object", + "properties": { + "dataset": { + "$ref": "#/definitions/datacatalogDataset" + } + }, + "description": "Response message for retrieving a Dataset. The response will include the metadata for the\nDataset." + }, + "datacatalogListArtifactsResponse": { + "type": "object", + "properties": { + "artifacts": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogArtifact" + }, + "title": "The list of artifacts" + }, + "next_token": { + "type": "string", + "title": "Token to use to request the next page, pass this into the next requests PaginationOptions" + } + }, + "title": "Response to list artifacts" + }, + "datacatalogListDatasetsResponse": { + "type": "object", + "properties": { + "datasets": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/datacatalogDataset" + }, + "title": "The list of datasets" + }, + "next_token": { + "type": "string", + "title": "Token to use to request the next page, pass this into the next requests PaginationOptions" + } + }, + "title": "List the datasets response with token for next pagination" + }, + "datacatalogPaginationOptions": { + "type": "object", + "properties": { + "limit": { + "type": "integer", + "format": "int64", + "title": "the max number of results to return" + }, + "token": { + "type": "string", + "title": "the token to pass to fetch the next page" + }, + "sortKey": { + "$ref": "#/definitions/PaginationOptionsSortKey", + "title": "the property that we want to sort the results by" + }, + "sortOrder": { + "$ref": "#/definitions/PaginationOptionsSortOrder", + "title": "the sort order of the results" + } + }, + "title": "Pagination options for making list requests" + }, + "datacatalogPartition": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + }, + "title": "An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair" + }, + "datacatalogPartitionPropertyFilter": { + "type": "object", + "properties": { + "key_val": { + "$ref": "#/definitions/flyteidl2datacatalogKeyValuePair" + } + }, + "title": "Partition properties we can filter by" + }, + "datacatalogReservationID": { + "type": "object", + "properties": { + "dataset_id": { + "$ref": "#/definitions/datacatalogDatasetID", + "title": "The unique ID for the reserved dataset" + }, + "tag_name": { + "type": "string", + "title": "The specific artifact tag for the reservation" + } + }, + "description": "ReservationID message that is composed of several string fields." + }, + "datacatalogSinglePropertyFilter": { + "type": "object", + "properties": { + "tag_filter": { + "$ref": "#/definitions/datacatalogTagPropertyFilter" + }, + "partition_filter": { + "$ref": "#/definitions/datacatalogPartitionPropertyFilter" + }, + "artifact_filter": { + "$ref": "#/definitions/datacatalogArtifactPropertyFilter" + }, + "dataset_filter": { + "$ref": "#/definitions/datacatalogDatasetPropertyFilter" + }, + "operator": { + "$ref": "#/definitions/SinglePropertyFilterComparisonOperator", + "title": "field 10 in case we add more entities to query" + } + }, + "description": "A single property to filter on." + }, + "datacatalogTag": { + "type": "object", + "properties": { + "name": { + "type": "string", + "title": "Name of tag" + }, + "artifact_id": { + "type": "string", + "title": "The tagged artifact" + }, + "dataset": { + "$ref": "#/definitions/datacatalogDatasetID", + "title": "The Dataset that this tag belongs to" + } + }, + "description": "Tag message that is unique to a Dataset. It is associated to a single artifact and\ncan be retrieved by name later." + }, + "datacatalogTagPropertyFilter": { + "type": "object", + "properties": { + "tag_name": { + "type": "string" + } + }, + "title": "Tag properties we can filter by" + }, + "datacatalogUpdateArtifactResponse": { + "type": "object", + "properties": { + "artifact_id": { + "type": "string", + "title": "The unique ID of the artifact updated" + } + }, + "description": "Response message for updating an Artifact." + }, + "flyteidl2coreEnumType": { + "type": "object", + "properties": { + "values": { + "type": "array", + "items": { + "type": "string" + }, + "description": "Predefined set of enum values." + } + }, + "description": "Enables declaring enum types, with predefined string values\nFor len(values) \u003e 0, the first value in the ordered list is regarded as the default value. If you wish\nTo provide no defaults, make the first value as undefined." + }, + "flyteidl2datacatalogGetOrExtendReservationResponse": { + "type": "object", + "properties": { + "reservation": { + "$ref": "#/definitions/flyteidl2datacatalogReservation", + "title": "The reservation to be acquired or extended" + } + }, + "title": "Response including either a newly minted reservation or the existing reservation" + }, + "flyteidl2datacatalogKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string" + }, + "value": { + "type": "string" + } + } + }, + "flyteidl2datacatalogMetadata": { + "type": "object", + "properties": { + "key_map": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "key map is a dictionary of key/val strings that represent metadata" + } + }, + "title": "Metadata representation for artifacts and datasets" + }, + "flyteidl2datacatalogReleaseReservationResponse": { + "type": "object", + "title": "Response to release reservation" + }, + "flyteidl2datacatalogReservation": { + "type": "object", + "properties": { + "reservation_id": { + "$ref": "#/definitions/datacatalogReservationID", + "title": "The unique ID for the reservation" + }, + "owner_id": { + "type": "string", + "title": "The unique ID of the owner for the reservation" + }, + "heartbeat_interval": { + "type": "string", + "title": "Recommended heartbeat interval to extend reservation" + }, + "expires_at": { + "type": "string", + "format": "date-time", + "title": "Expiration timestamp of this reservation" + }, + "metadata": { + "$ref": "#/definitions/flyteidl2datacatalogMetadata", + "title": "Free-form metadata associated with the artifact" + } + }, + "description": "A reservation including owner, heartbeat interval, expiration timestamp, and various metadata." + }, + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + }, + "protobufNullValue": { + "type": "string", + "enum": [ + "NULL_VALUE" + ], + "default": "NULL_VALUE", + "description": "`NullValue` is a singleton enumeration to represent the null value for the\n`Value` type union.\n\nThe JSON representation for `NullValue` is JSON `null`.\n\n - NULL_VALUE: Null value." + } + } +} diff --git a/gen/go/gateway/flyteidl2/event/cloudevents.swagger.json b/gen/go/gateway/flyteidl2/event/cloudevents.swagger.json new file mode 100644 index 0000000000..a54edf71fa --- /dev/null +++ b/gen/go/gateway/flyteidl2/event/cloudevents.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/event/cloudevents.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/event/event.swagger.json b/gen/go/gateway/flyteidl2/event/event.swagger.json new file mode 100644 index 0000000000..b6937c1ff6 --- /dev/null +++ b/gen/go/gateway/flyteidl2/event/event.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/event/event.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/common.swagger.json b/gen/go/gateway/flyteidl2/plugins/common.swagger.json new file mode 100644 index 0000000000..6159c03342 --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/common.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/common.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/kubeflow/common.swagger.json b/gen/go/gateway/flyteidl2/plugins/kubeflow/common.swagger.json new file mode 100644 index 0000000000..5893395bfa --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/kubeflow/common.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/kubeflow/common.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/kubeflow/mpi.swagger.json b/gen/go/gateway/flyteidl2/plugins/kubeflow/mpi.swagger.json new file mode 100644 index 0000000000..a900eab98f --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/kubeflow/mpi.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/kubeflow/mpi.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/kubeflow/pytorch.swagger.json b/gen/go/gateway/flyteidl2/plugins/kubeflow/pytorch.swagger.json new file mode 100644 index 0000000000..1477b15565 --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/kubeflow/pytorch.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/kubeflow/pytorch.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/kubeflow/tensorflow.swagger.json b/gen/go/gateway/flyteidl2/plugins/kubeflow/tensorflow.swagger.json new file mode 100644 index 0000000000..60cb2fa9b8 --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/kubeflow/tensorflow.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/kubeflow/tensorflow.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/mpi.swagger.json b/gen/go/gateway/flyteidl2/plugins/mpi.swagger.json new file mode 100644 index 0000000000..b84935b7da --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/mpi.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/mpi.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/presto.swagger.json b/gen/go/gateway/flyteidl2/plugins/presto.swagger.json new file mode 100644 index 0000000000..f78a0bac54 --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/presto.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/presto.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/qubole.swagger.json b/gen/go/gateway/flyteidl2/plugins/qubole.swagger.json new file mode 100644 index 0000000000..7a1eca793a --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/qubole.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/qubole.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/plugins/tensorflow.swagger.json b/gen/go/gateway/flyteidl2/plugins/tensorflow.swagger.json new file mode 100644 index 0000000000..7bfdd65db8 --- /dev/null +++ b/gen/go/gateway/flyteidl2/plugins/tensorflow.swagger.json @@ -0,0 +1,50 @@ +{ + "swagger": "2.0", + "info": { + "title": "flyteidl2/plugins/tensorflow.proto", + "version": "version not set" + }, + "consumes": [ + "application/json" + ], + "produces": [ + "application/json" + ], + "paths": {}, + "definitions": { + "googlerpcStatus": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "description": "The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]." + }, + "message": { + "type": "string", + "description": "A developer-facing error message, which should be in English. Any\nuser-facing error message should be localized and sent in the\n[google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client." + }, + "details": { + "type": "array", + "items": { + "type": "object", + "$ref": "#/definitions/protobufAny" + }, + "description": "A list of messages that carry the error details. There is a common set of\nmessage types for APIs to use." + } + }, + "description": "The `Status` type defines a logical error model that is suitable for\ndifferent programming environments, including REST APIs and RPC APIs. It is\nused by [gRPC](https://github.com/grpc). Each `Status` message contains\nthree pieces of data: error code, error message, and error details.\n\nYou can find out more about this error model and how to work with it in the\n[API Design Guide](https://cloud.google.com/apis/design/errors)." + }, + "protobufAny": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "description": "A URL/resource name that uniquely identifies the type of the serialized\nprotocol buffer message. This string must contain at least\none \"/\" character. The last segment of the URL's path must represent\nthe fully qualified name of the type (as in\n`path/google.protobuf.Duration`). The name should be in a canonical form\n(e.g., leading \".\" is not accepted).\n\nIn practice, teams usually precompile into the binary all types that they\nexpect it to use in the context of Any. However, for URLs which use the\nscheme `http`, `https`, or no scheme, one can optionally set up a type\nserver that maps type URLs to message definitions as follows:\n\n* If no scheme is provided, `https` is assumed.\n* An HTTP GET on the URL must yield a [google.protobuf.Type][]\n value in binary format, or produce an error.\n* Applications are allowed to cache lookup results based on the\n URL, or have them precompiled into a binary to avoid any\n lookup. Therefore, binary compatibility needs to be preserved\n on changes to types. (Use versioned type names to manage\n breaking changes.)\n\nNote: this functionality is not currently available in the official\nprotobuf release, and it is not used for type URLs beginning with\ntype.googleapis.com. As of May 2023, there are no widely used type server\nimplementations and no plans to implement one.\n\nSchemes other than `http`, `https` (or the empty scheme) might be\nused with implementation specific semantics." + } + }, + "additionalProperties": {}, + "description": "`Any` contains an arbitrary serialized protocol buffer message along with a\nURL that describes the type of the serialized message.\n\nProtobuf library provides support to pack/unpack Any values in the form\nof utility functions or additional generated methods of the Any type.\n\nExample 1: Pack and unpack a message in C++.\n\n Foo foo = ...;\n Any any;\n any.PackFrom(foo);\n ...\n if (any.UnpackTo(\u0026foo)) {\n ...\n }\n\nExample 2: Pack and unpack a message in Java.\n\n Foo foo = ...;\n Any any = Any.pack(foo);\n ...\n if (any.is(Foo.class)) {\n foo = any.unpack(Foo.class);\n }\n // or ...\n if (any.isSameTypeAs(Foo.getDefaultInstance())) {\n foo = any.unpack(Foo.getDefaultInstance());\n }\n\n Example 3: Pack and unpack a message in Python.\n\n foo = Foo(...)\n any = Any()\n any.Pack(foo)\n ...\n if any.Is(Foo.DESCRIPTOR):\n any.Unpack(foo)\n ...\n\n Example 4: Pack and unpack a message in Go\n\n foo := \u0026pb.Foo{...}\n any, err := anypb.New(foo)\n if err != nil {\n ...\n }\n ...\n foo := \u0026pb.Foo{}\n if err := any.UnmarshalTo(foo); err != nil {\n ...\n }\n\nThe pack methods provided by protobuf library will by default use\n'type.googleapis.com/full.type.name' as the type URL and the unpack\nmethods only use the fully qualified type name after the last '/'\nin the type URL, for example \"foo.bar.com/x/y.z\" will yield type\nname \"y.z\".\n\nJSON\n====\nThe JSON representation of an `Any` value uses the regular\nrepresentation of the deserialized, embedded message, with an\nadditional field `@type` which contains the type URL. Example:\n\n package google.profile;\n message Person {\n string first_name = 1;\n string last_name = 2;\n }\n\n {\n \"@type\": \"type.googleapis.com/google.profile.Person\",\n \"firstName\": \u003cstring\u003e,\n \"lastName\": \u003cstring\u003e\n }\n\nIf the embedded message type is well-known and has a custom JSON\nrepresentation, that representation will be embedded adding a field\n`value` which holds the custom JSON in addition to the `@type`\nfield. Example (for message [google.protobuf.Duration][]):\n\n {\n \"@type\": \"type.googleapis.com/google.protobuf.Duration\",\n \"value\": \"1.212s\"\n }" + } + } +} diff --git a/gen/go/gateway/flyteidl2/service/connector.swagger.json b/gen/go/gateway/flyteidl2/service/connector.swagger.json index f47ce4a9b5..7da18b8a59 100644 --- a/gen/go/gateway/flyteidl2/service/connector.swagger.json +++ b/gen/go/gateway/flyteidl2/service/connector.swagger.json @@ -744,7 +744,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -752,7 +752,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -951,20 +951,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1751,6 +1737,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreRuntimeMetadata": { "type": "object", "properties": { @@ -1929,6 +1929,62 @@ } } }, + "flyteidl2pluginsTaskExecutionMetadata": { + "type": "object", + "properties": { + "task_execution_id": { + "$ref": "#/definitions/coreTaskExecutionIdentifier" + }, + "namespace": { + "type": "string", + "title": "k8s namespace where the task is executed in" + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Labels attached to the task execution" + }, + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Annotations attached to the task execution" + }, + "k8s_service_account": { + "type": "string", + "title": "k8s service account associated with the task execution" + }, + "environment_variables": { + "type": "object", + "additionalProperties": { + "type": "string" + }, + "title": "Environment variables attached to the task execution" + }, + "max_attempts": { + "type": "integer", + "format": "int32", + "description": "Represents the maximum number of attempts allowed for a task.\nIf a task fails, it can be retried up to this maximum number of attempts." + }, + "interruptible": { + "type": "boolean", + "description": "Indicates whether the task execution can be interrupted.\nIf set to true, the task can be stopped before completion." + }, + "interruptible_failure_threshold": { + "type": "integer", + "format": "int32", + "description": "Specifies the threshold for failure count at which the interruptible property\nwill take effect. If the number of consecutive task failures exceeds this threshold,\ninterruptible behavior will be activated." + }, + "identity": { + "$ref": "#/definitions/flyteidl2coreIdentity", + "title": "Identity of user running this task execution" + } + }, + "description": "Represents a subset of runtime task execution metadata that are relevant to external plugins.\n\nID of the task execution" + }, "googlerpcStatus": { "type": "object", "properties": { @@ -1986,7 +2042,7 @@ "title": "Prefix for where task output data will be written. (e.g. s3://my-bucket/randomstring)" }, "task_execution_metadata": { - "$ref": "#/definitions/pluginsTaskExecutionMetadata", + "$ref": "#/definitions/flyteidl2pluginsTaskExecutionMetadata", "description": "subset of runtime task execution metadata." }, "connection": { @@ -2103,62 +2159,6 @@ } } }, - "pluginsTaskExecutionMetadata": { - "type": "object", - "properties": { - "task_execution_id": { - "$ref": "#/definitions/coreTaskExecutionIdentifier" - }, - "namespace": { - "type": "string", - "title": "k8s namespace where the task is executed in" - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "title": "Labels attached to the task execution" - }, - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "title": "Annotations attached to the task execution" - }, - "k8s_service_account": { - "type": "string", - "title": "k8s service account associated with the task execution" - }, - "environment_variables": { - "type": "object", - "additionalProperties": { - "type": "string" - }, - "title": "Environment variables attached to the task execution" - }, - "max_attempts": { - "type": "integer", - "format": "int32", - "description": "Represents the maximum number of attempts allowed for a task.\nIf a task fails, it can be retried up to this maximum number of attempts." - }, - "interruptible": { - "type": "boolean", - "description": "Indicates whether the task execution can be interrupted.\nIf set to true, the task can be stopped before completion." - }, - "interruptible_failure_threshold": { - "type": "integer", - "format": "int32", - "description": "Specifies the threshold for failure count at which the interruptible property\nwill take effect. If the number of consecutive task failures exceeds this threshold,\ninterruptible behavior will be activated." - }, - "identity": { - "$ref": "#/definitions/flyteidl2coreIdentity", - "title": "Identity of user running this task execution" - } - }, - "description": "Represents a subset of runtime task execution metadata that are relevant to external plugins.\n\nID of the task execution" - }, "protobufAny": { "type": "object", "properties": { @@ -2193,7 +2193,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "title": "Context for the action. If an action receives context, it'll automatically pass it to any actions it spawns.\nContext will not be used for cache key computation.\nExamples for context include:\n - User-provided metadata that is not part of the action's inputs.\n - Information about the environment the action is running in (e.g. cluster, region, etc.)\n - Tracing information about the action" } diff --git a/gen/go/gateway/flyteidl2/task/task_service.swagger.json b/gen/go/gateway/flyteidl2/task/task_service.swagger.json index 536d1f1915..8aa56b09ca 100644 --- a/gen/go/gateway/flyteidl2/task/task_service.swagger.json +++ b/gen/go/gateway/flyteidl2/task/task_service.swagger.json @@ -745,7 +745,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -753,7 +753,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -938,20 +938,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1755,6 +1741,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreRuntimeMetadata": { "type": "object", "properties": { @@ -2044,7 +2044,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Map of custom environment variables to be applied to the execution resource." } @@ -2105,7 +2105,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "title": "Context for the action. If an action receives context, it'll automatically pass it to any actions it spawns.\nContext will not be used for cache key computation.\nExamples for context include:\n - User-provided metadata that is not part of the action's inputs.\n - Information about the environment the action is running in (e.g. cluster, region, etc.)\n - Tracing information about the action" } diff --git a/gen/go/gateway/flyteidl2/trigger/trigger_service.swagger.json b/gen/go/gateway/flyteidl2/trigger/trigger_service.swagger.json index eeec4bdd76..c22081f854 100644 --- a/gen/go/gateway/flyteidl2/trigger/trigger_service.swagger.json +++ b/gen/go/gateway/flyteidl2/trigger/trigger_service.swagger.json @@ -541,20 +541,6 @@ }, "description": "Represents an error thrown from a node." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLiteral": { "type": "object", "properties": { @@ -1059,6 +1045,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreSecret": { "type": "object", "properties": { @@ -1232,7 +1232,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Map of custom environment variables to be applied to the execution resource." } @@ -1283,7 +1283,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "title": "Context for the action. If an action receives context, it'll automatically pass it to any actions it spawns.\nContext will not be used for cache key computation.\nExamples for context include:\n - User-provided metadata that is not part of the action's inputs.\n - Information about the environment the action is running in (e.g. cluster, region, etc.)\n - Tracing information about the action" } diff --git a/gen/go/gateway/flyteidl2/workflow/queue_service.swagger.json b/gen/go/gateway/flyteidl2/workflow/queue_service.swagger.json index f0e7ed7461..0049c34ce2 100644 --- a/gen/go/gateway/flyteidl2/workflow/queue_service.swagger.json +++ b/gen/go/gateway/flyteidl2/workflow/queue_service.swagger.json @@ -364,7 +364,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -372,7 +372,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -557,20 +557,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1286,6 +1272,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreRuntimeMetadata": { "type": "object", "properties": { @@ -1547,7 +1547,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Map of custom environment variables to be applied to the execution resource." } diff --git a/gen/go/gateway/flyteidl2/workflow/run_service.swagger.json b/gen/go/gateway/flyteidl2/workflow/run_service.swagger.json index 1a49b1d806..6600629477 100644 --- a/gen/go/gateway/flyteidl2/workflow/run_service.swagger.json +++ b/gen/go/gateway/flyteidl2/workflow/run_service.swagger.json @@ -62,15 +62,6 @@ "description": "- JSON: JSON / YAML for the metadata (which contains inlined primitive values). The representation is inline with the standard json specification as specified - https://www.json.org/json-en.html\n - PROTO: Proto is a serialized binary of `core.LiteralMap` defined in flyteidl/core", "title": "LiteralMapFormat decides the encoding format in which the input metadata should be made available to the containers.\nIf the user has access to the protocol buffer definitions, it is recommended to use the PROTO format.\nJSON and YAML do not need any protobuf definitions to read it\nAll remote references in core.LiteralMap are replaced with local filesystem references (the data is downloaded to local filesystem)" }, - "ErrorInfoKind": { - "type": "string", - "enum": [ - "KIND_UNSPECIFIED", - "KIND_USER", - "KIND_SYSTEM" - ], - "default": "KIND_UNSPECIFIED" - }, "FilterFunction": { "type": "string", "enum": [ @@ -828,7 +819,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -836,7 +827,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -1033,20 +1024,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1925,6 +1902,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreRuntimeMetadata": { "type": "object", "properties": { @@ -2204,7 +2195,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Map of custom environment variables to be applied to the execution resource." } @@ -2226,7 +2217,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "title": "Context for the action. If an action receives context, it'll automatically pass it to any actions it spawns.\nContext will not be used for cache key computation.\nExamples for context include:\n - User-provided metadata that is not part of the action's inputs.\n - Information about the environment the action is running in (e.g. cluster, region, etc.)\n - Tracing information about the action" } @@ -2695,12 +2686,21 @@ "description": "Error message." }, "kind": { - "$ref": "#/definitions/ErrorInfoKind", + "$ref": "#/definitions/workflowErrorInfoKind", "description": "Error kind." } }, "description": "ErrorInfo captures details of an error." }, + "workflowErrorInfoKind": { + "type": "string", + "enum": [ + "KIND_UNSPECIFIED", + "KIND_USER", + "KIND_SYSTEM" + ], + "default": "KIND_UNSPECIFIED" + }, "workflowGetActionDataResponse": { "type": "object", "properties": { diff --git a/gen/go/gateway/flyteidl2/workflow/translator_service.swagger.json b/gen/go/gateway/flyteidl2/workflow/translator_service.swagger.json index f4598b68c8..2ca320b98c 100644 --- a/gen/go/gateway/flyteidl2/workflow/translator_service.swagger.json +++ b/gen/go/gateway/flyteidl2/workflow/translator_service.swagger.json @@ -328,7 +328,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Environment variables will be set as the container is starting up." }, @@ -336,7 +336,7 @@ "type": "array", "items": { "type": "object", - "$ref": "#/definitions/coreKeyValuePair" + "$ref": "#/definitions/flyteidl2coreKeyValuePair" }, "description": "Allows extra configs to be available for the container.\nTODO: elaborate on how configs will become available.\nDeprecated, please use TaskTemplate.config instead." }, @@ -521,20 +521,6 @@ }, "description": "Defines a pod spec and additional pod metadata that is created when a task is executed." }, - "coreKeyValuePair": { - "type": "object", - "properties": { - "key": { - "type": "string", - "description": "required." - }, - "value": { - "type": "string", - "description": "+optional." - } - }, - "description": "A generic key value pair." - }, "coreLabelValue": { "type": "object", "properties": { @@ -1250,6 +1236,20 @@ }, "description": "Identity encapsulates the various security identities a task can run as. It's up to the underlying plugin to pick the\nright identity for the execution environment." }, + "flyteidl2coreKeyValuePair": { + "type": "object", + "properties": { + "key": { + "type": "string", + "description": "required." + }, + "value": { + "type": "string", + "description": "+optional." + } + }, + "description": "A generic key value pair." + }, "flyteidl2coreRuntimeMetadata": { "type": "object", "properties": { diff --git a/gen/python/flyteidl2/cacheservice/__init__.py b/gen/python/flyteidl2/cacheservice/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gen/python/flyteidl2/cacheservice/cacheservice_pb2.py b/gen/python/flyteidl2/cacheservice/cacheservice_pb2.py new file mode 100644 index 0000000000..352d73053f --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/cacheservice_pb2.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/cacheservice/cacheservice.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import identifier_pb2 as flyteidl2_dot_core_dot_identifier__pb2 +from flyteidl2.core import literals_pb2 as flyteidl2_dot_core_dot_literals__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n)flyteidl2/cacheservice/cacheservice.proto\x12\x16\x66lyteidl2.cacheservice\x1a\x1f\x66lyteidl2/core/identifier.proto\x1a\x1d\x66lyteidl2/core/literals.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\x97\x01\n\x0eKeyMapMetadata\x12J\n\x06values\x18\x01 \x03(\x0b\x32\x32.flyteidl2.cacheservice.KeyMapMetadata.ValuesEntryR\x06values\x1a\x39\n\x0bValuesEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x93\x02\n\x08Metadata\x12G\n\x11source_identifier\x18\x01 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x10sourceIdentifier\x12?\n\x07key_map\x18\x02 \x01(\x0b\x32&.flyteidl2.cacheservice.KeyMapMetadataR\x06keyMap\x12\x39\n\ncreated_at\x18\x03 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\x12\x42\n\x0flast_updated_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\rlastUpdatedAt\"\xbe\x01\n\x0c\x43\x61\x63hedOutput\x12\x45\n\x0foutput_literals\x18\x01 \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x00R\x0eoutputLiterals\x12\x1f\n\noutput_uri\x18\x02 \x01(\tH\x00R\toutputUri\x12<\n\x08metadata\x18\x03 \x01(\x0b\x32 .flyteidl2.cacheservice.MetadataR\x08metadataB\x08\n\x06output\"#\n\x0fGetCacheRequest\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\"P\n\x10GetCacheResponse\x12<\n\x06output\x18\x01 \x01(\x0b\x32$.flyteidl2.cacheservice.CachedOutputR\x06output\"\x84\x01\n\x0fOverwriteOutput\x12\x1c\n\toverwrite\x18\x01 \x01(\x08R\toverwrite\x12\x1f\n\x0b\x64\x65lete_blob\x18\x02 \x01(\x08R\ndeleteBlob\x12\x32\n\x07max_age\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x06maxAge\"\xa8\x01\n\x0fPutCacheRequest\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12<\n\x06output\x18\x02 \x01(\x0b\x32$.flyteidl2.cacheservice.CachedOutputR\x06output\x12\x45\n\toverwrite\x18\x03 \x01(\x0b\x32\'.flyteidl2.cacheservice.OverwriteOutputR\toverwrite\"\x12\n\x10PutCacheResponse\"&\n\x12\x44\x65leteCacheRequest\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\"\x15\n\x13\x44\x65leteCacheResponse\"\xbf\x01\n\x0bReservation\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\x12H\n\x12heartbeat_interval\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x11heartbeatInterval\x12\x39\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\texpiresAt\"\x96\x01\n\x1dGetOrExtendReservationRequest\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\x12H\n\x12heartbeat_interval\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x11heartbeatInterval\"g\n\x1eGetOrExtendReservationResponse\x12\x45\n\x0breservation\x18\x01 \x01(\x0b\x32#.flyteidl2.cacheservice.ReservationR\x0breservation\"H\n\x19ReleaseReservationRequest\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\"\x1c\n\x1aReleaseReservationResponse2\xac\x04\n\x0c\x43\x61\x63heService\x12X\n\x03Get\x12\'.flyteidl2.cacheservice.GetCacheRequest\x1a(.flyteidl2.cacheservice.GetCacheResponse\x12X\n\x03Put\x12\'.flyteidl2.cacheservice.PutCacheRequest\x1a(.flyteidl2.cacheservice.PutCacheResponse\x12\x61\n\x06\x44\x65lete\x12*.flyteidl2.cacheservice.DeleteCacheRequest\x1a+.flyteidl2.cacheservice.DeleteCacheResponse\x12\x87\x01\n\x16GetOrExtendReservation\x12\x35.flyteidl2.cacheservice.GetOrExtendReservationRequest\x1a\x36.flyteidl2.cacheservice.GetOrExtendReservationResponse\x12{\n\x12ReleaseReservation\x12\x31.flyteidl2.cacheservice.ReleaseReservationRequest\x1a\x32.flyteidl2.cacheservice.ReleaseReservationResponseB\xe6\x01\n\x1a\x63om.flyteidl2.cacheserviceB\x11\x43\x61\x63heserviceProtoH\x02P\x01Z:github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice\xa2\x02\x03\x46\x43X\xaa\x02\x16\x46lyteidl2.Cacheservice\xca\x02\x16\x46lyteidl2\\Cacheservice\xe2\x02\"Flyteidl2\\Cacheservice\\GPBMetadata\xea\x02\x17\x46lyteidl2::Cacheserviceb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.cacheservice.cacheservice_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\032com.flyteidl2.cacheserviceB\021CacheserviceProtoH\002P\001Z:github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice\242\002\003FCX\252\002\026Flyteidl2.Cacheservice\312\002\026Flyteidl2\\Cacheservice\342\002\"Flyteidl2\\Cacheservice\\GPBMetadata\352\002\027Flyteidl2::Cacheservice' + _KEYMAPMETADATA_VALUESENTRY._options = None + _KEYMAPMETADATA_VALUESENTRY._serialized_options = b'8\001' + _globals['_KEYMAPMETADATA']._serialized_start=199 + _globals['_KEYMAPMETADATA']._serialized_end=350 + _globals['_KEYMAPMETADATA_VALUESENTRY']._serialized_start=293 + _globals['_KEYMAPMETADATA_VALUESENTRY']._serialized_end=350 + _globals['_METADATA']._serialized_start=353 + _globals['_METADATA']._serialized_end=628 + _globals['_CACHEDOUTPUT']._serialized_start=631 + _globals['_CACHEDOUTPUT']._serialized_end=821 + _globals['_GETCACHEREQUEST']._serialized_start=823 + _globals['_GETCACHEREQUEST']._serialized_end=858 + _globals['_GETCACHERESPONSE']._serialized_start=860 + _globals['_GETCACHERESPONSE']._serialized_end=940 + _globals['_OVERWRITEOUTPUT']._serialized_start=943 + _globals['_OVERWRITEOUTPUT']._serialized_end=1075 + _globals['_PUTCACHEREQUEST']._serialized_start=1078 + _globals['_PUTCACHEREQUEST']._serialized_end=1246 + _globals['_PUTCACHERESPONSE']._serialized_start=1248 + _globals['_PUTCACHERESPONSE']._serialized_end=1266 + _globals['_DELETECACHEREQUEST']._serialized_start=1268 + _globals['_DELETECACHEREQUEST']._serialized_end=1306 + _globals['_DELETECACHERESPONSE']._serialized_start=1308 + _globals['_DELETECACHERESPONSE']._serialized_end=1329 + _globals['_RESERVATION']._serialized_start=1332 + _globals['_RESERVATION']._serialized_end=1523 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_start=1526 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_end=1676 + _globals['_GETOREXTENDRESERVATIONRESPONSE']._serialized_start=1678 + _globals['_GETOREXTENDRESERVATIONRESPONSE']._serialized_end=1781 + _globals['_RELEASERESERVATIONREQUEST']._serialized_start=1783 + _globals['_RELEASERESERVATIONREQUEST']._serialized_end=1855 + _globals['_RELEASERESERVATIONRESPONSE']._serialized_start=1857 + _globals['_RELEASERESERVATIONRESPONSE']._serialized_end=1885 + _globals['_CACHESERVICE']._serialized_start=1888 + _globals['_CACHESERVICE']._serialized_end=2444 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/cacheservice/cacheservice_pb2.pyi b/gen/python/flyteidl2/cacheservice/cacheservice_pb2.pyi new file mode 100644 index 0000000000..7defbd0c33 --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/cacheservice_pb2.pyi @@ -0,0 +1,131 @@ +from flyteidl2.core import identifier_pb2 as _identifier_pb2 +from flyteidl2.core import literals_pb2 as _literals_pb2 +from google.protobuf import duration_pb2 as _duration_pb2 +from google.protobuf import timestamp_pb2 as _timestamp_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class KeyMapMetadata(_message.Message): + __slots__ = ["values"] + class ValuesEntry(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + VALUES_FIELD_NUMBER: _ClassVar[int] + values: _containers.ScalarMap[str, str] + def __init__(self, values: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class Metadata(_message.Message): + __slots__ = ["source_identifier", "key_map", "created_at", "last_updated_at"] + SOURCE_IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + KEY_MAP_FIELD_NUMBER: _ClassVar[int] + CREATED_AT_FIELD_NUMBER: _ClassVar[int] + LAST_UPDATED_AT_FIELD_NUMBER: _ClassVar[int] + source_identifier: _identifier_pb2.Identifier + key_map: KeyMapMetadata + created_at: _timestamp_pb2.Timestamp + last_updated_at: _timestamp_pb2.Timestamp + def __init__(self, source_identifier: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., key_map: _Optional[_Union[KeyMapMetadata, _Mapping]] = ..., created_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., last_updated_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + +class CachedOutput(_message.Message): + __slots__ = ["output_literals", "output_uri", "metadata"] + OUTPUT_LITERALS_FIELD_NUMBER: _ClassVar[int] + OUTPUT_URI_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + output_literals: _literals_pb2.LiteralMap + output_uri: str + metadata: Metadata + def __init__(self, output_literals: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., output_uri: _Optional[str] = ..., metadata: _Optional[_Union[Metadata, _Mapping]] = ...) -> None: ... + +class GetCacheRequest(_message.Message): + __slots__ = ["key"] + KEY_FIELD_NUMBER: _ClassVar[int] + key: str + def __init__(self, key: _Optional[str] = ...) -> None: ... + +class GetCacheResponse(_message.Message): + __slots__ = ["output"] + OUTPUT_FIELD_NUMBER: _ClassVar[int] + output: CachedOutput + def __init__(self, output: _Optional[_Union[CachedOutput, _Mapping]] = ...) -> None: ... + +class OverwriteOutput(_message.Message): + __slots__ = ["overwrite", "delete_blob", "max_age"] + OVERWRITE_FIELD_NUMBER: _ClassVar[int] + DELETE_BLOB_FIELD_NUMBER: _ClassVar[int] + MAX_AGE_FIELD_NUMBER: _ClassVar[int] + overwrite: bool + delete_blob: bool + max_age: _duration_pb2.Duration + def __init__(self, overwrite: bool = ..., delete_blob: bool = ..., max_age: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + +class PutCacheRequest(_message.Message): + __slots__ = ["key", "output", "overwrite"] + KEY_FIELD_NUMBER: _ClassVar[int] + OUTPUT_FIELD_NUMBER: _ClassVar[int] + OVERWRITE_FIELD_NUMBER: _ClassVar[int] + key: str + output: CachedOutput + overwrite: OverwriteOutput + def __init__(self, key: _Optional[str] = ..., output: _Optional[_Union[CachedOutput, _Mapping]] = ..., overwrite: _Optional[_Union[OverwriteOutput, _Mapping]] = ...) -> None: ... + +class PutCacheResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class DeleteCacheRequest(_message.Message): + __slots__ = ["key"] + KEY_FIELD_NUMBER: _ClassVar[int] + key: str + def __init__(self, key: _Optional[str] = ...) -> None: ... + +class DeleteCacheResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class Reservation(_message.Message): + __slots__ = ["key", "owner_id", "heartbeat_interval", "expires_at"] + KEY_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + HEARTBEAT_INTERVAL_FIELD_NUMBER: _ClassVar[int] + EXPIRES_AT_FIELD_NUMBER: _ClassVar[int] + key: str + owner_id: str + heartbeat_interval: _duration_pb2.Duration + expires_at: _timestamp_pb2.Timestamp + def __init__(self, key: _Optional[str] = ..., owner_id: _Optional[str] = ..., heartbeat_interval: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., expires_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + +class GetOrExtendReservationRequest(_message.Message): + __slots__ = ["key", "owner_id", "heartbeat_interval"] + KEY_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + HEARTBEAT_INTERVAL_FIELD_NUMBER: _ClassVar[int] + key: str + owner_id: str + heartbeat_interval: _duration_pb2.Duration + def __init__(self, key: _Optional[str] = ..., owner_id: _Optional[str] = ..., heartbeat_interval: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + +class GetOrExtendReservationResponse(_message.Message): + __slots__ = ["reservation"] + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class ReleaseReservationRequest(_message.Message): + __slots__ = ["key", "owner_id"] + KEY_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + key: str + owner_id: str + def __init__(self, key: _Optional[str] = ..., owner_id: _Optional[str] = ...) -> None: ... + +class ReleaseReservationResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... diff --git a/gen/python/flyteidl2/cacheservice/cacheservice_pb2_grpc.py b/gen/python/flyteidl2/cacheservice/cacheservice_pb2_grpc.py new file mode 100644 index 0000000000..bd3fb51607 --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/cacheservice_pb2_grpc.py @@ -0,0 +1,209 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flyteidl2.cacheservice import cacheservice_pb2 as flyteidl2_dot_cacheservice_dot_cacheservice__pb2 + + +class CacheServiceStub(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Get = channel.unary_unary( + '/flyteidl2.cacheservice.CacheService/Get', + request_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.FromString, + ) + self.Put = channel.unary_unary( + '/flyteidl2.cacheservice.CacheService/Put', + request_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.FromString, + ) + self.Delete = channel.unary_unary( + '/flyteidl2.cacheservice.CacheService/Delete', + request_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.FromString, + ) + self.GetOrExtendReservation = channel.unary_unary( + '/flyteidl2.cacheservice.CacheService/GetOrExtendReservation', + request_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.FromString, + ) + self.ReleaseReservation = channel.unary_unary( + '/flyteidl2.cacheservice.CacheService/ReleaseReservation', + request_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.FromString, + ) + + +class CacheServiceServicer(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + def Get(self, request, context): + """Retrieves cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Put(self, request, context): + """Stores or updates cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Delete(self, request, context): + """Deletes cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetOrExtendReservation(self, request, context): + """Get or extend a reservation for a cache key + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleaseReservation(self, request, context): + """Release the reservation for a cache key + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CacheServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Get': grpc.unary_unary_rpc_method_handler( + servicer.Get, + request_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.SerializeToString, + ), + 'Put': grpc.unary_unary_rpc_method_handler( + servicer.Put, + request_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.SerializeToString, + ), + 'Delete': grpc.unary_unary_rpc_method_handler( + servicer.Delete, + request_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.SerializeToString, + ), + 'GetOrExtendReservation': grpc.unary_unary_rpc_method_handler( + servicer.GetOrExtendReservation, + request_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.SerializeToString, + ), + 'ReleaseReservation': grpc.unary_unary_rpc_method_handler( + servicer.ReleaseReservation, + request_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flyteidl2.cacheservice.CacheService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class CacheService(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + @staticmethod + def Get(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.CacheService/Get', + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Put(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.CacheService/Put', + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Delete(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.CacheService/Delete', + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetOrExtendReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.CacheService/GetOrExtendReservation', + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReleaseReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.CacheService/ReleaseReservation', + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/gen/python/flyteidl2/cacheservice/v2/__init__.py b/gen/python/flyteidl2/cacheservice/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.py b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.py new file mode 100644 index 0000000000..a6a602a362 --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/cacheservice/v2/cacheservice.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from buf.validate import validate_pb2 as buf_dot_validate_dot_validate__pb2 +from flyteidl2.cacheservice import cacheservice_pb2 as flyteidl2_dot_cacheservice_dot_cacheservice__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n,flyteidl2/cacheservice/v2/cacheservice.proto\x12\x19\x66lyteidl2.cacheservice.v2\x1a\x1b\x62uf/validate/validate.proto\x1a)flyteidl2/cacheservice/cacheservice.proto\"k\n\nIdentifier\x12\x19\n\x03org\x18\x01 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x03org\x12!\n\x07project\x18\x02 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x07project\x12\x1f\n\x06\x64omain\x18\x03 \x01(\tB\x07\xbaH\x04r\x02\x10\x01R\x06\x64omain\"\xac\x01\n\x0fGetCacheRequest\x12J\n\x0c\x62\x61se_request\x18\x01 \x01(\x0b\x32\'.flyteidl2.cacheservice.GetCacheRequestR\x0b\x62\x61seRequest\x12M\n\nidentifier\x18\x02 \x01(\x0b\x32%.flyteidl2.cacheservice.v2.IdentifierB\x06\xbaH\x03\xc8\x01\x01R\nidentifier\"\xac\x01\n\x0fPutCacheRequest\x12J\n\x0c\x62\x61se_request\x18\x01 \x01(\x0b\x32\'.flyteidl2.cacheservice.PutCacheRequestR\x0b\x62\x61seRequest\x12M\n\nidentifier\x18\x02 \x01(\x0b\x32%.flyteidl2.cacheservice.v2.IdentifierB\x06\xbaH\x03\xc8\x01\x01R\nidentifier\"\xb2\x01\n\x12\x44\x65leteCacheRequest\x12M\n\x0c\x62\x61se_request\x18\x01 \x01(\x0b\x32*.flyteidl2.cacheservice.DeleteCacheRequestR\x0b\x62\x61seRequest\x12M\n\nidentifier\x18\x02 \x01(\x0b\x32%.flyteidl2.cacheservice.v2.IdentifierB\x06\xbaH\x03\xc8\x01\x01R\nidentifier\"\xc8\x01\n\x1dGetOrExtendReservationRequest\x12X\n\x0c\x62\x61se_request\x18\x01 \x01(\x0b\x32\x35.flyteidl2.cacheservice.GetOrExtendReservationRequestR\x0b\x62\x61seRequest\x12M\n\nidentifier\x18\x02 \x01(\x0b\x32%.flyteidl2.cacheservice.v2.IdentifierB\x06\xbaH\x03\xc8\x01\x01R\nidentifier\"\xc0\x01\n\x19ReleaseReservationRequest\x12T\n\x0c\x62\x61se_request\x18\x01 \x01(\x0b\x32\x31.flyteidl2.cacheservice.ReleaseReservationRequestR\x0b\x62\x61seRequest\x12M\n\nidentifier\x18\x02 \x01(\x0b\x32%.flyteidl2.cacheservice.v2.IdentifierB\x06\xbaH\x03\xc8\x01\x01R\nidentifier2\xbb\x04\n\x0c\x43\x61\x63heService\x12[\n\x03Get\x12*.flyteidl2.cacheservice.v2.GetCacheRequest\x1a(.flyteidl2.cacheservice.GetCacheResponse\x12[\n\x03Put\x12*.flyteidl2.cacheservice.v2.PutCacheRequest\x1a(.flyteidl2.cacheservice.PutCacheResponse\x12\x64\n\x06\x44\x65lete\x12-.flyteidl2.cacheservice.v2.DeleteCacheRequest\x1a+.flyteidl2.cacheservice.DeleteCacheResponse\x12\x8a\x01\n\x16GetOrExtendReservation\x12\x38.flyteidl2.cacheservice.v2.GetOrExtendReservationRequest\x1a\x36.flyteidl2.cacheservice.GetOrExtendReservationResponse\x12~\n\x12ReleaseReservation\x12\x34.flyteidl2.cacheservice.v2.ReleaseReservationRequest\x1a\x32.flyteidl2.cacheservice.ReleaseReservationResponseB\xf9\x01\n\x1d\x63om.flyteidl2.cacheservice.v2B\x11\x43\x61\x63heserviceProtoH\x02P\x01Z=github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice/v2\xa2\x02\x03\x46\x43X\xaa\x02\x19\x46lyteidl2.Cacheservice.V2\xca\x02\x19\x46lyteidl2\\Cacheservice\\V2\xe2\x02%Flyteidl2\\Cacheservice\\V2\\GPBMetadata\xea\x02\x1b\x46lyteidl2::Cacheservice::V2b\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.cacheservice.v2.cacheservice_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\035com.flyteidl2.cacheservice.v2B\021CacheserviceProtoH\002P\001Z=github.com/flyteorg/flyte/v2/gen/go/flyteidl2/cacheservice/v2\242\002\003FCX\252\002\031Flyteidl2.Cacheservice.V2\312\002\031Flyteidl2\\Cacheservice\\V2\342\002%Flyteidl2\\Cacheservice\\V2\\GPBMetadata\352\002\033Flyteidl2::Cacheservice::V2' + _IDENTIFIER.fields_by_name['org']._options = None + _IDENTIFIER.fields_by_name['org']._serialized_options = b'\272H\004r\002\020\001' + _IDENTIFIER.fields_by_name['project']._options = None + _IDENTIFIER.fields_by_name['project']._serialized_options = b'\272H\004r\002\020\001' + _IDENTIFIER.fields_by_name['domain']._options = None + _IDENTIFIER.fields_by_name['domain']._serialized_options = b'\272H\004r\002\020\001' + _GETCACHEREQUEST.fields_by_name['identifier']._options = None + _GETCACHEREQUEST.fields_by_name['identifier']._serialized_options = b'\272H\003\310\001\001' + _PUTCACHEREQUEST.fields_by_name['identifier']._options = None + _PUTCACHEREQUEST.fields_by_name['identifier']._serialized_options = b'\272H\003\310\001\001' + _DELETECACHEREQUEST.fields_by_name['identifier']._options = None + _DELETECACHEREQUEST.fields_by_name['identifier']._serialized_options = b'\272H\003\310\001\001' + _GETOREXTENDRESERVATIONREQUEST.fields_by_name['identifier']._options = None + _GETOREXTENDRESERVATIONREQUEST.fields_by_name['identifier']._serialized_options = b'\272H\003\310\001\001' + _RELEASERESERVATIONREQUEST.fields_by_name['identifier']._options = None + _RELEASERESERVATIONREQUEST.fields_by_name['identifier']._serialized_options = b'\272H\003\310\001\001' + _globals['_IDENTIFIER']._serialized_start=147 + _globals['_IDENTIFIER']._serialized_end=254 + _globals['_GETCACHEREQUEST']._serialized_start=257 + _globals['_GETCACHEREQUEST']._serialized_end=429 + _globals['_PUTCACHEREQUEST']._serialized_start=432 + _globals['_PUTCACHEREQUEST']._serialized_end=604 + _globals['_DELETECACHEREQUEST']._serialized_start=607 + _globals['_DELETECACHEREQUEST']._serialized_end=785 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_start=788 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_end=988 + _globals['_RELEASERESERVATIONREQUEST']._serialized_start=991 + _globals['_RELEASERESERVATIONREQUEST']._serialized_end=1183 + _globals['_CACHESERVICE']._serialized_start=1186 + _globals['_CACHESERVICE']._serialized_end=1757 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.pyi b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.pyi new file mode 100644 index 0000000000..7468c086ea --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2.pyi @@ -0,0 +1,57 @@ +from buf.validate import validate_pb2 as _validate_pb2 +from flyteidl2.cacheservice import cacheservice_pb2 as _cacheservice_pb2 +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class Identifier(_message.Message): + __slots__ = ["org", "project", "domain"] + ORG_FIELD_NUMBER: _ClassVar[int] + PROJECT_FIELD_NUMBER: _ClassVar[int] + DOMAIN_FIELD_NUMBER: _ClassVar[int] + org: str + project: str + domain: str + def __init__(self, org: _Optional[str] = ..., project: _Optional[str] = ..., domain: _Optional[str] = ...) -> None: ... + +class GetCacheRequest(_message.Message): + __slots__ = ["base_request", "identifier"] + BASE_REQUEST_FIELD_NUMBER: _ClassVar[int] + IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + base_request: _cacheservice_pb2.GetCacheRequest + identifier: Identifier + def __init__(self, base_request: _Optional[_Union[_cacheservice_pb2.GetCacheRequest, _Mapping]] = ..., identifier: _Optional[_Union[Identifier, _Mapping]] = ...) -> None: ... + +class PutCacheRequest(_message.Message): + __slots__ = ["base_request", "identifier"] + BASE_REQUEST_FIELD_NUMBER: _ClassVar[int] + IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + base_request: _cacheservice_pb2.PutCacheRequest + identifier: Identifier + def __init__(self, base_request: _Optional[_Union[_cacheservice_pb2.PutCacheRequest, _Mapping]] = ..., identifier: _Optional[_Union[Identifier, _Mapping]] = ...) -> None: ... + +class DeleteCacheRequest(_message.Message): + __slots__ = ["base_request", "identifier"] + BASE_REQUEST_FIELD_NUMBER: _ClassVar[int] + IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + base_request: _cacheservice_pb2.DeleteCacheRequest + identifier: Identifier + def __init__(self, base_request: _Optional[_Union[_cacheservice_pb2.DeleteCacheRequest, _Mapping]] = ..., identifier: _Optional[_Union[Identifier, _Mapping]] = ...) -> None: ... + +class GetOrExtendReservationRequest(_message.Message): + __slots__ = ["base_request", "identifier"] + BASE_REQUEST_FIELD_NUMBER: _ClassVar[int] + IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + base_request: _cacheservice_pb2.GetOrExtendReservationRequest + identifier: Identifier + def __init__(self, base_request: _Optional[_Union[_cacheservice_pb2.GetOrExtendReservationRequest, _Mapping]] = ..., identifier: _Optional[_Union[Identifier, _Mapping]] = ...) -> None: ... + +class ReleaseReservationRequest(_message.Message): + __slots__ = ["base_request", "identifier"] + BASE_REQUEST_FIELD_NUMBER: _ClassVar[int] + IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + base_request: _cacheservice_pb2.ReleaseReservationRequest + identifier: Identifier + def __init__(self, base_request: _Optional[_Union[_cacheservice_pb2.ReleaseReservationRequest, _Mapping]] = ..., identifier: _Optional[_Union[Identifier, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2_grpc.py b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2_grpc.py new file mode 100644 index 0000000000..1c01a4cb6e --- /dev/null +++ b/gen/python/flyteidl2/cacheservice/v2/cacheservice_pb2_grpc.py @@ -0,0 +1,210 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flyteidl2.cacheservice import cacheservice_pb2 as flyteidl2_dot_cacheservice_dot_cacheservice__pb2 +from flyteidl2.cacheservice.v2 import cacheservice_pb2 as flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2 + + +class CacheServiceStub(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Get = channel.unary_unary( + '/flyteidl2.cacheservice.v2.CacheService/Get', + request_serializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.FromString, + ) + self.Put = channel.unary_unary( + '/flyteidl2.cacheservice.v2.CacheService/Put', + request_serializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.PutCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.FromString, + ) + self.Delete = channel.unary_unary( + '/flyteidl2.cacheservice.v2.CacheService/Delete', + request_serializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.DeleteCacheRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.FromString, + ) + self.GetOrExtendReservation = channel.unary_unary( + '/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation', + request_serializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetOrExtendReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.FromString, + ) + self.ReleaseReservation = channel.unary_unary( + '/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation', + request_serializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.ReleaseReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.FromString, + ) + + +class CacheServiceServicer(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + def Get(self, request, context): + """Retrieves cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Put(self, request, context): + """Stores or updates cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Delete(self, request, context): + """Deletes cached data by key. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetOrExtendReservation(self, request, context): + """Get or extend a reservation for a cache key + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleaseReservation(self, request, context): + """Release the reservation for a cache key + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CacheServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Get': grpc.unary_unary_rpc_method_handler( + servicer.Get, + request_deserializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.SerializeToString, + ), + 'Put': grpc.unary_unary_rpc_method_handler( + servicer.Put, + request_deserializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.PutCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.SerializeToString, + ), + 'Delete': grpc.unary_unary_rpc_method_handler( + servicer.Delete, + request_deserializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.DeleteCacheRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.SerializeToString, + ), + 'GetOrExtendReservation': grpc.unary_unary_rpc_method_handler( + servicer.GetOrExtendReservation, + request_deserializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetOrExtendReservationRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.SerializeToString, + ), + 'ReleaseReservation': grpc.unary_unary_rpc_method_handler( + servicer.ReleaseReservation, + request_deserializer=flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.ReleaseReservationRequest.FromString, + response_serializer=flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flyteidl2.cacheservice.v2.CacheService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class CacheService(object): + """ + CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + """ + + @staticmethod + def Get(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.v2.CacheService/Get', + flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Put(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.v2.CacheService/Put', + flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.PutCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.PutCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Delete(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.v2.CacheService/Delete', + flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.DeleteCacheRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.DeleteCacheResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetOrExtendReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation', + flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.GetOrExtendReservationRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.GetOrExtendReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReleaseReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation', + flyteidl2_dot_cacheservice_dot_v2_dot_cacheservice__pb2.ReleaseReservationRequest.SerializeToString, + flyteidl2_dot_cacheservice_dot_cacheservice__pb2.ReleaseReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/gen/python/flyteidl2/common/configuration_pb2.py b/gen/python/flyteidl2/common/configuration_pb2.py new file mode 100644 index 0000000000..7a9ba96a51 --- /dev/null +++ b/gen/python/flyteidl2/common/configuration_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/common/configuration.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$flyteidl2/common/configuration.proto\x12\x10\x66lyteidl2.common*l\n\x10\x41ttributesSource\x12\x16\n\x12SOURCE_UNSPECIFIED\x10\x00\x12\n\n\x06GLOBAL\x10\x01\x12\n\n\x06\x44OMAIN\x10\x02\x12\x0b\n\x07PROJECT\x10\x03\x12\x12\n\x0ePROJECT_DOMAIN\x10\x04\x12\x07\n\x03ORG\x10\x05\x42\xc3\x01\n\x14\x63om.flyteidl2.commonB\x12\x43onfigurationProtoH\x02P\x01Z4github.com/flyteorg/flyte/v2/gen/go/flyteidl2/common\xa2\x02\x03\x46\x43X\xaa\x02\x10\x46lyteidl2.Common\xca\x02\x10\x46lyteidl2\\Common\xe2\x02\x1c\x46lyteidl2\\Common\\GPBMetadata\xea\x02\x11\x46lyteidl2::Commonb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.common.configuration_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\024com.flyteidl2.commonB\022ConfigurationProtoH\002P\001Z4github.com/flyteorg/flyte/v2/gen/go/flyteidl2/common\242\002\003FCX\252\002\020Flyteidl2.Common\312\002\020Flyteidl2\\Common\342\002\034Flyteidl2\\Common\\GPBMetadata\352\002\021Flyteidl2::Common' + _globals['_ATTRIBUTESSOURCE']._serialized_start=58 + _globals['_ATTRIBUTESSOURCE']._serialized_end=166 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/common/configuration_pb2.pyi b/gen/python/flyteidl2/common/configuration_pb2.pyi new file mode 100644 index 0000000000..72b51de8f4 --- /dev/null +++ b/gen/python/flyteidl2/common/configuration_pb2.pyi @@ -0,0 +1,20 @@ +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from typing import ClassVar as _ClassVar + +DESCRIPTOR: _descriptor.FileDescriptor + +class AttributesSource(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + SOURCE_UNSPECIFIED: _ClassVar[AttributesSource] + GLOBAL: _ClassVar[AttributesSource] + DOMAIN: _ClassVar[AttributesSource] + PROJECT: _ClassVar[AttributesSource] + PROJECT_DOMAIN: _ClassVar[AttributesSource] + ORG: _ClassVar[AttributesSource] +SOURCE_UNSPECIFIED: AttributesSource +GLOBAL: AttributesSource +DOMAIN: AttributesSource +PROJECT: AttributesSource +PROJECT_DOMAIN: AttributesSource +ORG: AttributesSource diff --git a/gen/python/flyteidl2/common/configuration_pb2_grpc.py b/gen/python/flyteidl2/common/configuration_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/common/configuration_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/core/errors_pb2.py b/gen/python/flyteidl2/core/errors_pb2.py new file mode 100644 index 0000000000..b6d8b5e769 --- /dev/null +++ b/gen/python/flyteidl2/core/errors_pb2.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/core/errors.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import execution_pb2 as flyteidl2_dot_core_dot_execution__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66lyteidl2/core/errors.proto\x12\x0e\x66lyteidl2.core\x1a\x1e\x66lyteidl2/core/execution.proto\"\xe7\x01\n\x0e\x43ontainerError\x12\x12\n\x04\x63ode\x18\x01 \x01(\tR\x04\x63ode\x12\x18\n\x07message\x18\x02 \x01(\tR\x07message\x12\x37\n\x04kind\x18\x03 \x01(\x0e\x32#.flyteidl2.core.ContainerError.KindR\x04kind\x12@\n\x06origin\x18\x04 \x01(\x0e\x32(.flyteidl2.core.ExecutionError.ErrorKindR\x06origin\",\n\x04Kind\x12\x13\n\x0fNON_RECOVERABLE\x10\x00\x12\x0f\n\x0bRECOVERABLE\x10\x01\"E\n\rErrorDocument\x12\x34\n\x05\x65rror\x18\x01 \x01(\x0b\x32\x1e.flyteidl2.core.ContainerErrorR\x05\x65rrorB\xb0\x01\n\x12\x63om.flyteidl2.coreB\x0b\x45rrorsProtoH\x02P\x01Z2github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core\xa2\x02\x03\x46\x43X\xaa\x02\x0e\x46lyteidl2.Core\xca\x02\x0e\x46lyteidl2\\Core\xe2\x02\x1a\x46lyteidl2\\Core\\GPBMetadata\xea\x02\x0f\x46lyteidl2::Coreb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.core.errors_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\022com.flyteidl2.coreB\013ErrorsProtoH\002P\001Z2github.com/flyteorg/flyte/v2/gen/go/flyteidl2/core\242\002\003FCX\252\002\016Flyteidl2.Core\312\002\016Flyteidl2\\Core\342\002\032Flyteidl2\\Core\\GPBMetadata\352\002\017Flyteidl2::Core' + _globals['_CONTAINERERROR']._serialized_start=80 + _globals['_CONTAINERERROR']._serialized_end=311 + _globals['_CONTAINERERROR_KIND']._serialized_start=267 + _globals['_CONTAINERERROR_KIND']._serialized_end=311 + _globals['_ERRORDOCUMENT']._serialized_start=313 + _globals['_ERRORDOCUMENT']._serialized_end=382 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/core/errors_pb2.pyi b/gen/python/flyteidl2/core/errors_pb2.pyi new file mode 100644 index 0000000000..b990af6eb9 --- /dev/null +++ b/gen/python/flyteidl2/core/errors_pb2.pyi @@ -0,0 +1,31 @@ +from flyteidl2.core import execution_pb2 as _execution_pb2 +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ContainerError(_message.Message): + __slots__ = ["code", "message", "kind", "origin"] + class Kind(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + NON_RECOVERABLE: _ClassVar[ContainerError.Kind] + RECOVERABLE: _ClassVar[ContainerError.Kind] + NON_RECOVERABLE: ContainerError.Kind + RECOVERABLE: ContainerError.Kind + CODE_FIELD_NUMBER: _ClassVar[int] + MESSAGE_FIELD_NUMBER: _ClassVar[int] + KIND_FIELD_NUMBER: _ClassVar[int] + ORIGIN_FIELD_NUMBER: _ClassVar[int] + code: str + message: str + kind: ContainerError.Kind + origin: _execution_pb2.ExecutionError.ErrorKind + def __init__(self, code: _Optional[str] = ..., message: _Optional[str] = ..., kind: _Optional[_Union[ContainerError.Kind, str]] = ..., origin: _Optional[_Union[_execution_pb2.ExecutionError.ErrorKind, str]] = ...) -> None: ... + +class ErrorDocument(_message.Message): + __slots__ = ["error"] + ERROR_FIELD_NUMBER: _ClassVar[int] + error: ContainerError + def __init__(self, error: _Optional[_Union[ContainerError, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/core/errors_pb2_grpc.py b/gen/python/flyteidl2/core/errors_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/core/errors_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/datacatalog/__init__.py b/gen/python/flyteidl2/datacatalog/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gen/python/flyteidl2/datacatalog/datacatalog_pb2.py b/gen/python/flyteidl2/datacatalog/datacatalog_pb2.py new file mode 100644 index 0000000000..a731239b22 --- /dev/null +++ b/gen/python/flyteidl2/datacatalog/datacatalog_pb2.py @@ -0,0 +1,113 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/datacatalog/datacatalog.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import literals_pb2 as flyteidl2_dot_core_dot_literals__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'flyteidl2/datacatalog/datacatalog.proto\x12\x15\x66lyteidl2.datacatalog\x1a\x1d\x66lyteidl2/core/literals.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"P\n\x14\x43reateDatasetRequest\x12\x38\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x1e.flyteidl2.datacatalog.DatasetR\x07\x64\x61taset\"\x17\n\x15\x43reateDatasetResponse\"O\n\x11GetDatasetRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\"N\n\x12GetDatasetResponse\x12\x38\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32\x1e.flyteidl2.datacatalog.DatasetR\x07\x64\x61taset\"\xa0\x01\n\x12GetArtifactRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\x12!\n\x0b\x61rtifact_id\x18\x02 \x01(\tH\x00R\nartifactId\x12\x1b\n\x08tag_name\x18\x03 \x01(\tH\x00R\x07tagNameB\x0e\n\x0cquery_handle\"R\n\x13GetArtifactResponse\x12;\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.ArtifactR\x08\x61rtifact\"T\n\x15\x43reateArtifactRequest\x12;\n\x08\x61rtifact\x18\x01 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.ArtifactR\x08\x61rtifact\"\x18\n\x16\x43reateArtifactResponse\"=\n\rAddTagRequest\x12,\n\x03tag\x18\x01 \x01(\x0b\x32\x1a.flyteidl2.datacatalog.TagR\x03tag\"\x10\n\x0e\x41\x64\x64TagResponse\"\xdd\x01\n\x14ListArtifactsRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\x12?\n\x06\x66ilter\x18\x02 \x01(\x0b\x32\'.flyteidl2.datacatalog.FilterExpressionR\x06\x66ilter\x12H\n\npagination\x18\x03 \x01(\x0b\x32(.flyteidl2.datacatalog.PaginationOptionsR\npagination\"u\n\x15ListArtifactsResponse\x12=\n\tartifacts\x18\x01 \x03(\x0b\x32\x1f.flyteidl2.datacatalog.ArtifactR\tartifacts\x12\x1d\n\nnext_token\x18\x02 \x01(\tR\tnextToken\"\xa0\x01\n\x13ListDatasetsRequest\x12?\n\x06\x66ilter\x18\x01 \x01(\x0b\x32\'.flyteidl2.datacatalog.FilterExpressionR\x06\x66ilter\x12H\n\npagination\x18\x02 \x01(\x0b\x32(.flyteidl2.datacatalog.PaginationOptionsR\npagination\"q\n\x14ListDatasetsResponse\x12:\n\x08\x64\x61tasets\x18\x01 \x03(\x0b\x32\x1e.flyteidl2.datacatalog.DatasetR\x08\x64\x61tasets\x12\x1d\n\nnext_token\x18\x02 \x01(\tR\tnextToken\"\x99\x02\n\x15UpdateArtifactRequest\x12:\n\x07\x64\x61taset\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\x12!\n\x0b\x61rtifact_id\x18\x02 \x01(\tH\x00R\nartifactId\x12\x1b\n\x08tag_name\x18\x03 \x01(\tH\x00R\x07tagName\x12\x37\n\x04\x64\x61ta\x18\x04 \x03(\x0b\x32#.flyteidl2.datacatalog.ArtifactDataR\x04\x64\x61ta\x12;\n\x08metadata\x18\x05 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.MetadataR\x08metadataB\x0e\n\x0cquery_handle\"9\n\x16UpdateArtifactResponse\x12\x1f\n\x0b\x61rtifact_id\x18\x01 \x01(\tR\nartifactId\"k\n\rReservationID\x12?\n\ndataset_id\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\tdatasetId\x12\x19\n\x08tag_name\x18\x02 \x01(\tR\x07tagName\"\xd1\x01\n\x1dGetOrExtendReservationRequest\x12K\n\x0ereservation_id\x18\x01 \x01(\x0b\x32$.flyteidl2.datacatalog.ReservationIDR\rreservationId\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\x12H\n\x12heartbeat_interval\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x11heartbeatInterval\"\xb7\x02\n\x0bReservation\x12K\n\x0ereservation_id\x18\x01 \x01(\x0b\x32$.flyteidl2.datacatalog.ReservationIDR\rreservationId\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\x12H\n\x12heartbeat_interval\x18\x03 \x01(\x0b\x32\x19.google.protobuf.DurationR\x11heartbeatInterval\x12\x39\n\nexpires_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\texpiresAt\x12;\n\x08metadata\x18\x06 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.MetadataR\x08metadata\"f\n\x1eGetOrExtendReservationResponse\x12\x44\n\x0breservation\x18\x01 \x01(\x0b\x32\".flyteidl2.datacatalog.ReservationR\x0breservation\"\x83\x01\n\x19ReleaseReservationRequest\x12K\n\x0ereservation_id\x18\x01 \x01(\x0b\x32$.flyteidl2.datacatalog.ReservationIDR\rreservationId\x12\x19\n\x08owner_id\x18\x02 \x01(\tR\x07ownerId\"\x1c\n\x1aReleaseReservationResponse\"\x9e\x01\n\x07\x44\x61taset\x12\x30\n\x02id\x18\x01 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x02id\x12;\n\x08metadata\x18\x02 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.MetadataR\x08metadata\x12$\n\rpartitionKeys\x18\x03 \x03(\tR\rpartitionKeys\"3\n\tPartition\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\"\x91\x01\n\tDatasetID\x12\x18\n\x07project\x18\x01 \x01(\tR\x07project\x12\x12\n\x04name\x18\x02 \x01(\tR\x04name\x12\x16\n\x06\x64omain\x18\x03 \x01(\tR\x06\x64omain\x12\x18\n\x07version\x18\x04 \x01(\tR\x07version\x12\x12\n\x04UUID\x18\x05 \x01(\tR\x04UUID\x12\x10\n\x03org\x18\x06 \x01(\tR\x03org\"\xf9\x02\n\x08\x41rtifact\x12\x0e\n\x02id\x18\x01 \x01(\tR\x02id\x12:\n\x07\x64\x61taset\x18\x02 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\x12\x37\n\x04\x64\x61ta\x18\x03 \x03(\x0b\x32#.flyteidl2.datacatalog.ArtifactDataR\x04\x64\x61ta\x12;\n\x08metadata\x18\x04 \x01(\x0b\x32\x1f.flyteidl2.datacatalog.MetadataR\x08metadata\x12@\n\npartitions\x18\x05 \x03(\x0b\x32 .flyteidl2.datacatalog.PartitionR\npartitions\x12.\n\x04tags\x18\x06 \x03(\x0b\x32\x1a.flyteidl2.datacatalog.TagR\x04tags\x12\x39\n\ncreated_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\tcreatedAt\"Q\n\x0c\x41rtifactData\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12-\n\x05value\x18\x02 \x01(\x0b\x32\x17.flyteidl2.core.LiteralR\x05value\"v\n\x03Tag\x12\x12\n\x04name\x18\x01 \x01(\tR\x04name\x12\x1f\n\x0b\x61rtifact_id\x18\x02 \x01(\tR\nartifactId\x12:\n\x07\x64\x61taset\x18\x03 \x01(\x0b\x32 .flyteidl2.datacatalog.DatasetIDR\x07\x64\x61taset\"\x8b\x01\n\x08Metadata\x12\x44\n\x07key_map\x18\x01 \x03(\x0b\x32+.flyteidl2.datacatalog.Metadata.KeyMapEntryR\x06keyMap\x1a\x39\n\x0bKeyMapEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"Y\n\x10\x46ilterExpression\x12\x45\n\x07\x66ilters\x18\x01 \x03(\x0b\x32+.flyteidl2.datacatalog.SinglePropertyFilterR\x07\x66ilters\"\x80\x04\n\x14SinglePropertyFilter\x12I\n\ntag_filter\x18\x01 \x01(\x0b\x32(.flyteidl2.datacatalog.TagPropertyFilterH\x00R\ttagFilter\x12[\n\x10partition_filter\x18\x02 \x01(\x0b\x32..flyteidl2.datacatalog.PartitionPropertyFilterH\x00R\x0fpartitionFilter\x12X\n\x0f\x61rtifact_filter\x18\x03 \x01(\x0b\x32-.flyteidl2.datacatalog.ArtifactPropertyFilterH\x00R\x0e\x61rtifactFilter\x12U\n\x0e\x64\x61taset_filter\x18\x04 \x01(\x0b\x32,.flyteidl2.datacatalog.DatasetPropertyFilterH\x00R\rdatasetFilter\x12Z\n\x08operator\x18\n \x01(\x0e\x32>.flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperatorR\x08operator\" \n\x12\x43omparisonOperator\x12\n\n\x06\x45QUALS\x10\x00\x42\x11\n\x0fproperty_filter\"G\n\x16\x41rtifactPropertyFilter\x12!\n\x0b\x61rtifact_id\x18\x01 \x01(\tH\x00R\nartifactIdB\n\n\x08property\"<\n\x11TagPropertyFilter\x12\x1b\n\x08tag_name\x18\x01 \x01(\tH\x00R\x07tagNameB\n\n\x08property\"e\n\x17PartitionPropertyFilter\x12>\n\x07key_val\x18\x01 \x01(\x0b\x32#.flyteidl2.datacatalog.KeyValuePairH\x00R\x06keyValB\n\n\x08property\"6\n\x0cKeyValuePair\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value\"\x9f\x01\n\x15\x44\x61tasetPropertyFilter\x12\x1a\n\x07project\x18\x01 \x01(\tH\x00R\x07project\x12\x14\n\x04name\x18\x02 \x01(\tH\x00R\x04name\x12\x18\n\x06\x64omain\x18\x03 \x01(\tH\x00R\x06\x64omain\x12\x1a\n\x07version\x18\x04 \x01(\tH\x00R\x07version\x12\x12\n\x03org\x18\x05 \x01(\tH\x00R\x03orgB\n\n\x08property\"\xa7\x02\n\x11PaginationOptions\x12\x14\n\x05limit\x18\x01 \x01(\rR\x05limit\x12\x14\n\x05token\x18\x02 \x01(\tR\x05token\x12J\n\x07sortKey\x18\x03 \x01(\x0e\x32\x30.flyteidl2.datacatalog.PaginationOptions.SortKeyR\x07sortKey\x12P\n\tsortOrder\x18\x04 \x01(\x0e\x32\x32.flyteidl2.datacatalog.PaginationOptions.SortOrderR\tsortOrder\"*\n\tSortOrder\x12\x0e\n\nDESCENDING\x10\x00\x12\r\n\tASCENDING\x10\x01\"\x1c\n\x07SortKey\x12\x11\n\rCREATION_TIME\x10\x00\x32\xcf\x08\n\x0b\x44\x61taCatalog\x12j\n\rCreateDataset\x12+.flyteidl2.datacatalog.CreateDatasetRequest\x1a,.flyteidl2.datacatalog.CreateDatasetResponse\x12\x61\n\nGetDataset\x12(.flyteidl2.datacatalog.GetDatasetRequest\x1a).flyteidl2.datacatalog.GetDatasetResponse\x12m\n\x0e\x43reateArtifact\x12,.flyteidl2.datacatalog.CreateArtifactRequest\x1a-.flyteidl2.datacatalog.CreateArtifactResponse\x12\x64\n\x0bGetArtifact\x12).flyteidl2.datacatalog.GetArtifactRequest\x1a*.flyteidl2.datacatalog.GetArtifactResponse\x12U\n\x06\x41\x64\x64Tag\x12$.flyteidl2.datacatalog.AddTagRequest\x1a%.flyteidl2.datacatalog.AddTagResponse\x12j\n\rListArtifacts\x12+.flyteidl2.datacatalog.ListArtifactsRequest\x1a,.flyteidl2.datacatalog.ListArtifactsResponse\x12g\n\x0cListDatasets\x12*.flyteidl2.datacatalog.ListDatasetsRequest\x1a+.flyteidl2.datacatalog.ListDatasetsResponse\x12m\n\x0eUpdateArtifact\x12,.flyteidl2.datacatalog.UpdateArtifactRequest\x1a-.flyteidl2.datacatalog.UpdateArtifactResponse\x12\x85\x01\n\x16GetOrExtendReservation\x12\x34.flyteidl2.datacatalog.GetOrExtendReservationRequest\x1a\x35.flyteidl2.datacatalog.GetOrExtendReservationResponse\x12y\n\x12ReleaseReservation\x12\x30.flyteidl2.datacatalog.ReleaseReservationRequest\x1a\x31.flyteidl2.datacatalog.ReleaseReservationResponseB\xdf\x01\n\x19\x63om.flyteidl2.datacatalogB\x10\x44\x61tacatalogProtoH\x02P\x01Z9github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog\xa2\x02\x03\x46\x44X\xaa\x02\x15\x46lyteidl2.Datacatalog\xca\x02\x15\x46lyteidl2\\Datacatalog\xe2\x02!Flyteidl2\\Datacatalog\\GPBMetadata\xea\x02\x16\x46lyteidl2::Datacatalogb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.datacatalog.datacatalog_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\031com.flyteidl2.datacatalogB\020DatacatalogProtoH\002P\001Z9github.com/flyteorg/flyte/v2/gen/go/flyteidl2/datacatalog\242\002\003FDX\252\002\025Flyteidl2.Datacatalog\312\002\025Flyteidl2\\Datacatalog\342\002!Flyteidl2\\Datacatalog\\GPBMetadata\352\002\026Flyteidl2::Datacatalog' + _METADATA_KEYMAPENTRY._options = None + _METADATA_KEYMAPENTRY._serialized_options = b'8\001' + _globals['_CREATEDATASETREQUEST']._serialized_start=162 + _globals['_CREATEDATASETREQUEST']._serialized_end=242 + _globals['_CREATEDATASETRESPONSE']._serialized_start=244 + _globals['_CREATEDATASETRESPONSE']._serialized_end=267 + _globals['_GETDATASETREQUEST']._serialized_start=269 + _globals['_GETDATASETREQUEST']._serialized_end=348 + _globals['_GETDATASETRESPONSE']._serialized_start=350 + _globals['_GETDATASETRESPONSE']._serialized_end=428 + _globals['_GETARTIFACTREQUEST']._serialized_start=431 + _globals['_GETARTIFACTREQUEST']._serialized_end=591 + _globals['_GETARTIFACTRESPONSE']._serialized_start=593 + _globals['_GETARTIFACTRESPONSE']._serialized_end=675 + _globals['_CREATEARTIFACTREQUEST']._serialized_start=677 + _globals['_CREATEARTIFACTREQUEST']._serialized_end=761 + _globals['_CREATEARTIFACTRESPONSE']._serialized_start=763 + _globals['_CREATEARTIFACTRESPONSE']._serialized_end=787 + _globals['_ADDTAGREQUEST']._serialized_start=789 + _globals['_ADDTAGREQUEST']._serialized_end=850 + _globals['_ADDTAGRESPONSE']._serialized_start=852 + _globals['_ADDTAGRESPONSE']._serialized_end=868 + _globals['_LISTARTIFACTSREQUEST']._serialized_start=871 + _globals['_LISTARTIFACTSREQUEST']._serialized_end=1092 + _globals['_LISTARTIFACTSRESPONSE']._serialized_start=1094 + _globals['_LISTARTIFACTSRESPONSE']._serialized_end=1211 + _globals['_LISTDATASETSREQUEST']._serialized_start=1214 + _globals['_LISTDATASETSREQUEST']._serialized_end=1374 + _globals['_LISTDATASETSRESPONSE']._serialized_start=1376 + _globals['_LISTDATASETSRESPONSE']._serialized_end=1489 + _globals['_UPDATEARTIFACTREQUEST']._serialized_start=1492 + _globals['_UPDATEARTIFACTREQUEST']._serialized_end=1773 + _globals['_UPDATEARTIFACTRESPONSE']._serialized_start=1775 + _globals['_UPDATEARTIFACTRESPONSE']._serialized_end=1832 + _globals['_RESERVATIONID']._serialized_start=1834 + _globals['_RESERVATIONID']._serialized_end=1941 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_start=1944 + _globals['_GETOREXTENDRESERVATIONREQUEST']._serialized_end=2153 + _globals['_RESERVATION']._serialized_start=2156 + _globals['_RESERVATION']._serialized_end=2467 + _globals['_GETOREXTENDRESERVATIONRESPONSE']._serialized_start=2469 + _globals['_GETOREXTENDRESERVATIONRESPONSE']._serialized_end=2571 + _globals['_RELEASERESERVATIONREQUEST']._serialized_start=2574 + _globals['_RELEASERESERVATIONREQUEST']._serialized_end=2705 + _globals['_RELEASERESERVATIONRESPONSE']._serialized_start=2707 + _globals['_RELEASERESERVATIONRESPONSE']._serialized_end=2735 + _globals['_DATASET']._serialized_start=2738 + _globals['_DATASET']._serialized_end=2896 + _globals['_PARTITION']._serialized_start=2898 + _globals['_PARTITION']._serialized_end=2949 + _globals['_DATASETID']._serialized_start=2952 + _globals['_DATASETID']._serialized_end=3097 + _globals['_ARTIFACT']._serialized_start=3100 + _globals['_ARTIFACT']._serialized_end=3477 + _globals['_ARTIFACTDATA']._serialized_start=3479 + _globals['_ARTIFACTDATA']._serialized_end=3560 + _globals['_TAG']._serialized_start=3562 + _globals['_TAG']._serialized_end=3680 + _globals['_METADATA']._serialized_start=3683 + _globals['_METADATA']._serialized_end=3822 + _globals['_METADATA_KEYMAPENTRY']._serialized_start=3765 + _globals['_METADATA_KEYMAPENTRY']._serialized_end=3822 + _globals['_FILTEREXPRESSION']._serialized_start=3824 + _globals['_FILTEREXPRESSION']._serialized_end=3913 + _globals['_SINGLEPROPERTYFILTER']._serialized_start=3916 + _globals['_SINGLEPROPERTYFILTER']._serialized_end=4428 + _globals['_SINGLEPROPERTYFILTER_COMPARISONOPERATOR']._serialized_start=4377 + _globals['_SINGLEPROPERTYFILTER_COMPARISONOPERATOR']._serialized_end=4409 + _globals['_ARTIFACTPROPERTYFILTER']._serialized_start=4430 + _globals['_ARTIFACTPROPERTYFILTER']._serialized_end=4501 + _globals['_TAGPROPERTYFILTER']._serialized_start=4503 + _globals['_TAGPROPERTYFILTER']._serialized_end=4563 + _globals['_PARTITIONPROPERTYFILTER']._serialized_start=4565 + _globals['_PARTITIONPROPERTYFILTER']._serialized_end=4666 + _globals['_KEYVALUEPAIR']._serialized_start=4668 + _globals['_KEYVALUEPAIR']._serialized_end=4722 + _globals['_DATASETPROPERTYFILTER']._serialized_start=4725 + _globals['_DATASETPROPERTYFILTER']._serialized_end=4884 + _globals['_PAGINATIONOPTIONS']._serialized_start=4887 + _globals['_PAGINATIONOPTIONS']._serialized_end=5182 + _globals['_PAGINATIONOPTIONS_SORTORDER']._serialized_start=5110 + _globals['_PAGINATIONOPTIONS_SORTORDER']._serialized_end=5152 + _globals['_PAGINATIONOPTIONS_SORTKEY']._serialized_start=5154 + _globals['_PAGINATIONOPTIONS_SORTKEY']._serialized_end=5182 + _globals['_DATACATALOG']._serialized_start=5185 + _globals['_DATACATALOG']._serialized_end=6288 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/datacatalog/datacatalog_pb2.pyi b/gen/python/flyteidl2/datacatalog/datacatalog_pb2.pyi new file mode 100644 index 0000000000..0d5fa03c9c --- /dev/null +++ b/gen/python/flyteidl2/datacatalog/datacatalog_pb2.pyi @@ -0,0 +1,341 @@ +from flyteidl2.core import literals_pb2 as _literals_pb2 +from google.protobuf import duration_pb2 as _duration_pb2 +from google.protobuf import timestamp_pb2 as _timestamp_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class CreateDatasetRequest(_message.Message): + __slots__ = ["dataset"] + DATASET_FIELD_NUMBER: _ClassVar[int] + dataset: Dataset + def __init__(self, dataset: _Optional[_Union[Dataset, _Mapping]] = ...) -> None: ... + +class CreateDatasetResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class GetDatasetRequest(_message.Message): + __slots__ = ["dataset"] + DATASET_FIELD_NUMBER: _ClassVar[int] + dataset: DatasetID + def __init__(self, dataset: _Optional[_Union[DatasetID, _Mapping]] = ...) -> None: ... + +class GetDatasetResponse(_message.Message): + __slots__ = ["dataset"] + DATASET_FIELD_NUMBER: _ClassVar[int] + dataset: Dataset + def __init__(self, dataset: _Optional[_Union[Dataset, _Mapping]] = ...) -> None: ... + +class GetArtifactRequest(_message.Message): + __slots__ = ["dataset", "artifact_id", "tag_name"] + DATASET_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_ID_FIELD_NUMBER: _ClassVar[int] + TAG_NAME_FIELD_NUMBER: _ClassVar[int] + dataset: DatasetID + artifact_id: str + tag_name: str + def __init__(self, dataset: _Optional[_Union[DatasetID, _Mapping]] = ..., artifact_id: _Optional[str] = ..., tag_name: _Optional[str] = ...) -> None: ... + +class GetArtifactResponse(_message.Message): + __slots__ = ["artifact"] + ARTIFACT_FIELD_NUMBER: _ClassVar[int] + artifact: Artifact + def __init__(self, artifact: _Optional[_Union[Artifact, _Mapping]] = ...) -> None: ... + +class CreateArtifactRequest(_message.Message): + __slots__ = ["artifact"] + ARTIFACT_FIELD_NUMBER: _ClassVar[int] + artifact: Artifact + def __init__(self, artifact: _Optional[_Union[Artifact, _Mapping]] = ...) -> None: ... + +class CreateArtifactResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class AddTagRequest(_message.Message): + __slots__ = ["tag"] + TAG_FIELD_NUMBER: _ClassVar[int] + tag: Tag + def __init__(self, tag: _Optional[_Union[Tag, _Mapping]] = ...) -> None: ... + +class AddTagResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class ListArtifactsRequest(_message.Message): + __slots__ = ["dataset", "filter", "pagination"] + DATASET_FIELD_NUMBER: _ClassVar[int] + FILTER_FIELD_NUMBER: _ClassVar[int] + PAGINATION_FIELD_NUMBER: _ClassVar[int] + dataset: DatasetID + filter: FilterExpression + pagination: PaginationOptions + def __init__(self, dataset: _Optional[_Union[DatasetID, _Mapping]] = ..., filter: _Optional[_Union[FilterExpression, _Mapping]] = ..., pagination: _Optional[_Union[PaginationOptions, _Mapping]] = ...) -> None: ... + +class ListArtifactsResponse(_message.Message): + __slots__ = ["artifacts", "next_token"] + ARTIFACTS_FIELD_NUMBER: _ClassVar[int] + NEXT_TOKEN_FIELD_NUMBER: _ClassVar[int] + artifacts: _containers.RepeatedCompositeFieldContainer[Artifact] + next_token: str + def __init__(self, artifacts: _Optional[_Iterable[_Union[Artifact, _Mapping]]] = ..., next_token: _Optional[str] = ...) -> None: ... + +class ListDatasetsRequest(_message.Message): + __slots__ = ["filter", "pagination"] + FILTER_FIELD_NUMBER: _ClassVar[int] + PAGINATION_FIELD_NUMBER: _ClassVar[int] + filter: FilterExpression + pagination: PaginationOptions + def __init__(self, filter: _Optional[_Union[FilterExpression, _Mapping]] = ..., pagination: _Optional[_Union[PaginationOptions, _Mapping]] = ...) -> None: ... + +class ListDatasetsResponse(_message.Message): + __slots__ = ["datasets", "next_token"] + DATASETS_FIELD_NUMBER: _ClassVar[int] + NEXT_TOKEN_FIELD_NUMBER: _ClassVar[int] + datasets: _containers.RepeatedCompositeFieldContainer[Dataset] + next_token: str + def __init__(self, datasets: _Optional[_Iterable[_Union[Dataset, _Mapping]]] = ..., next_token: _Optional[str] = ...) -> None: ... + +class UpdateArtifactRequest(_message.Message): + __slots__ = ["dataset", "artifact_id", "tag_name", "data", "metadata"] + DATASET_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_ID_FIELD_NUMBER: _ClassVar[int] + TAG_NAME_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + dataset: DatasetID + artifact_id: str + tag_name: str + data: _containers.RepeatedCompositeFieldContainer[ArtifactData] + metadata: Metadata + def __init__(self, dataset: _Optional[_Union[DatasetID, _Mapping]] = ..., artifact_id: _Optional[str] = ..., tag_name: _Optional[str] = ..., data: _Optional[_Iterable[_Union[ArtifactData, _Mapping]]] = ..., metadata: _Optional[_Union[Metadata, _Mapping]] = ...) -> None: ... + +class UpdateArtifactResponse(_message.Message): + __slots__ = ["artifact_id"] + ARTIFACT_ID_FIELD_NUMBER: _ClassVar[int] + artifact_id: str + def __init__(self, artifact_id: _Optional[str] = ...) -> None: ... + +class ReservationID(_message.Message): + __slots__ = ["dataset_id", "tag_name"] + DATASET_ID_FIELD_NUMBER: _ClassVar[int] + TAG_NAME_FIELD_NUMBER: _ClassVar[int] + dataset_id: DatasetID + tag_name: str + def __init__(self, dataset_id: _Optional[_Union[DatasetID, _Mapping]] = ..., tag_name: _Optional[str] = ...) -> None: ... + +class GetOrExtendReservationRequest(_message.Message): + __slots__ = ["reservation_id", "owner_id", "heartbeat_interval"] + RESERVATION_ID_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + HEARTBEAT_INTERVAL_FIELD_NUMBER: _ClassVar[int] + reservation_id: ReservationID + owner_id: str + heartbeat_interval: _duration_pb2.Duration + def __init__(self, reservation_id: _Optional[_Union[ReservationID, _Mapping]] = ..., owner_id: _Optional[str] = ..., heartbeat_interval: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ...) -> None: ... + +class Reservation(_message.Message): + __slots__ = ["reservation_id", "owner_id", "heartbeat_interval", "expires_at", "metadata"] + RESERVATION_ID_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + HEARTBEAT_INTERVAL_FIELD_NUMBER: _ClassVar[int] + EXPIRES_AT_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + reservation_id: ReservationID + owner_id: str + heartbeat_interval: _duration_pb2.Duration + expires_at: _timestamp_pb2.Timestamp + metadata: Metadata + def __init__(self, reservation_id: _Optional[_Union[ReservationID, _Mapping]] = ..., owner_id: _Optional[str] = ..., heartbeat_interval: _Optional[_Union[_duration_pb2.Duration, _Mapping]] = ..., expires_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., metadata: _Optional[_Union[Metadata, _Mapping]] = ...) -> None: ... + +class GetOrExtendReservationResponse(_message.Message): + __slots__ = ["reservation"] + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class ReleaseReservationRequest(_message.Message): + __slots__ = ["reservation_id", "owner_id"] + RESERVATION_ID_FIELD_NUMBER: _ClassVar[int] + OWNER_ID_FIELD_NUMBER: _ClassVar[int] + reservation_id: ReservationID + owner_id: str + def __init__(self, reservation_id: _Optional[_Union[ReservationID, _Mapping]] = ..., owner_id: _Optional[str] = ...) -> None: ... + +class ReleaseReservationResponse(_message.Message): + __slots__ = [] + def __init__(self) -> None: ... + +class Dataset(_message.Message): + __slots__ = ["id", "metadata", "partitionKeys"] + ID_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + PARTITIONKEYS_FIELD_NUMBER: _ClassVar[int] + id: DatasetID + metadata: Metadata + partitionKeys: _containers.RepeatedScalarFieldContainer[str] + def __init__(self, id: _Optional[_Union[DatasetID, _Mapping]] = ..., metadata: _Optional[_Union[Metadata, _Mapping]] = ..., partitionKeys: _Optional[_Iterable[str]] = ...) -> None: ... + +class Partition(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + +class DatasetID(_message.Message): + __slots__ = ["project", "name", "domain", "version", "UUID", "org"] + PROJECT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + DOMAIN_FIELD_NUMBER: _ClassVar[int] + VERSION_FIELD_NUMBER: _ClassVar[int] + UUID_FIELD_NUMBER: _ClassVar[int] + ORG_FIELD_NUMBER: _ClassVar[int] + project: str + name: str + domain: str + version: str + UUID: str + org: str + def __init__(self, project: _Optional[str] = ..., name: _Optional[str] = ..., domain: _Optional[str] = ..., version: _Optional[str] = ..., UUID: _Optional[str] = ..., org: _Optional[str] = ...) -> None: ... + +class Artifact(_message.Message): + __slots__ = ["id", "dataset", "data", "metadata", "partitions", "tags", "created_at"] + ID_FIELD_NUMBER: _ClassVar[int] + DATASET_FIELD_NUMBER: _ClassVar[int] + DATA_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + PARTITIONS_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + CREATED_AT_FIELD_NUMBER: _ClassVar[int] + id: str + dataset: DatasetID + data: _containers.RepeatedCompositeFieldContainer[ArtifactData] + metadata: Metadata + partitions: _containers.RepeatedCompositeFieldContainer[Partition] + tags: _containers.RepeatedCompositeFieldContainer[Tag] + created_at: _timestamp_pb2.Timestamp + def __init__(self, id: _Optional[str] = ..., dataset: _Optional[_Union[DatasetID, _Mapping]] = ..., data: _Optional[_Iterable[_Union[ArtifactData, _Mapping]]] = ..., metadata: _Optional[_Union[Metadata, _Mapping]] = ..., partitions: _Optional[_Iterable[_Union[Partition, _Mapping]]] = ..., tags: _Optional[_Iterable[_Union[Tag, _Mapping]]] = ..., created_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + +class ArtifactData(_message.Message): + __slots__ = ["name", "value"] + NAME_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + name: str + value: _literals_pb2.Literal + def __init__(self, name: _Optional[str] = ..., value: _Optional[_Union[_literals_pb2.Literal, _Mapping]] = ...) -> None: ... + +class Tag(_message.Message): + __slots__ = ["name", "artifact_id", "dataset"] + NAME_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_ID_FIELD_NUMBER: _ClassVar[int] + DATASET_FIELD_NUMBER: _ClassVar[int] + name: str + artifact_id: str + dataset: DatasetID + def __init__(self, name: _Optional[str] = ..., artifact_id: _Optional[str] = ..., dataset: _Optional[_Union[DatasetID, _Mapping]] = ...) -> None: ... + +class Metadata(_message.Message): + __slots__ = ["key_map"] + class KeyMapEntry(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + KEY_MAP_FIELD_NUMBER: _ClassVar[int] + key_map: _containers.ScalarMap[str, str] + def __init__(self, key_map: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class FilterExpression(_message.Message): + __slots__ = ["filters"] + FILTERS_FIELD_NUMBER: _ClassVar[int] + filters: _containers.RepeatedCompositeFieldContainer[SinglePropertyFilter] + def __init__(self, filters: _Optional[_Iterable[_Union[SinglePropertyFilter, _Mapping]]] = ...) -> None: ... + +class SinglePropertyFilter(_message.Message): + __slots__ = ["tag_filter", "partition_filter", "artifact_filter", "dataset_filter", "operator"] + class ComparisonOperator(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + EQUALS: _ClassVar[SinglePropertyFilter.ComparisonOperator] + EQUALS: SinglePropertyFilter.ComparisonOperator + TAG_FILTER_FIELD_NUMBER: _ClassVar[int] + PARTITION_FILTER_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_FILTER_FIELD_NUMBER: _ClassVar[int] + DATASET_FILTER_FIELD_NUMBER: _ClassVar[int] + OPERATOR_FIELD_NUMBER: _ClassVar[int] + tag_filter: TagPropertyFilter + partition_filter: PartitionPropertyFilter + artifact_filter: ArtifactPropertyFilter + dataset_filter: DatasetPropertyFilter + operator: SinglePropertyFilter.ComparisonOperator + def __init__(self, tag_filter: _Optional[_Union[TagPropertyFilter, _Mapping]] = ..., partition_filter: _Optional[_Union[PartitionPropertyFilter, _Mapping]] = ..., artifact_filter: _Optional[_Union[ArtifactPropertyFilter, _Mapping]] = ..., dataset_filter: _Optional[_Union[DatasetPropertyFilter, _Mapping]] = ..., operator: _Optional[_Union[SinglePropertyFilter.ComparisonOperator, str]] = ...) -> None: ... + +class ArtifactPropertyFilter(_message.Message): + __slots__ = ["artifact_id"] + ARTIFACT_ID_FIELD_NUMBER: _ClassVar[int] + artifact_id: str + def __init__(self, artifact_id: _Optional[str] = ...) -> None: ... + +class TagPropertyFilter(_message.Message): + __slots__ = ["tag_name"] + TAG_NAME_FIELD_NUMBER: _ClassVar[int] + tag_name: str + def __init__(self, tag_name: _Optional[str] = ...) -> None: ... + +class PartitionPropertyFilter(_message.Message): + __slots__ = ["key_val"] + KEY_VAL_FIELD_NUMBER: _ClassVar[int] + key_val: KeyValuePair + def __init__(self, key_val: _Optional[_Union[KeyValuePair, _Mapping]] = ...) -> None: ... + +class KeyValuePair(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + +class DatasetPropertyFilter(_message.Message): + __slots__ = ["project", "name", "domain", "version", "org"] + PROJECT_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + DOMAIN_FIELD_NUMBER: _ClassVar[int] + VERSION_FIELD_NUMBER: _ClassVar[int] + ORG_FIELD_NUMBER: _ClassVar[int] + project: str + name: str + domain: str + version: str + org: str + def __init__(self, project: _Optional[str] = ..., name: _Optional[str] = ..., domain: _Optional[str] = ..., version: _Optional[str] = ..., org: _Optional[str] = ...) -> None: ... + +class PaginationOptions(_message.Message): + __slots__ = ["limit", "token", "sortKey", "sortOrder"] + class SortOrder(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + DESCENDING: _ClassVar[PaginationOptions.SortOrder] + ASCENDING: _ClassVar[PaginationOptions.SortOrder] + DESCENDING: PaginationOptions.SortOrder + ASCENDING: PaginationOptions.SortOrder + class SortKey(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + CREATION_TIME: _ClassVar[PaginationOptions.SortKey] + CREATION_TIME: PaginationOptions.SortKey + LIMIT_FIELD_NUMBER: _ClassVar[int] + TOKEN_FIELD_NUMBER: _ClassVar[int] + SORTKEY_FIELD_NUMBER: _ClassVar[int] + SORTORDER_FIELD_NUMBER: _ClassVar[int] + limit: int + token: str + sortKey: PaginationOptions.SortKey + sortOrder: PaginationOptions.SortOrder + def __init__(self, limit: _Optional[int] = ..., token: _Optional[str] = ..., sortKey: _Optional[_Union[PaginationOptions.SortKey, str]] = ..., sortOrder: _Optional[_Union[PaginationOptions.SortOrder, str]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/datacatalog/datacatalog_pb2_grpc.py b/gen/python/flyteidl2/datacatalog/datacatalog_pb2_grpc.py new file mode 100644 index 0000000000..1ac1d30b7d --- /dev/null +++ b/gen/python/flyteidl2/datacatalog/datacatalog_pb2_grpc.py @@ -0,0 +1,404 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from flyteidl2.datacatalog import datacatalog_pb2 as flyteidl2_dot_datacatalog_dot_datacatalog__pb2 + + +class DataCatalogStub(object): + """TODO @pvditt clean this up + + + Data Catalog service definition + Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + Artifacts are associated with a Dataset, and can be tagged for retrieval. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateDataset = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/CreateDataset', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetResponse.FromString, + ) + self.GetDataset = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/GetDataset', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetResponse.FromString, + ) + self.CreateArtifact = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/CreateArtifact', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactResponse.FromString, + ) + self.GetArtifact = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/GetArtifact', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactResponse.FromString, + ) + self.AddTag = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/AddTag', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagResponse.FromString, + ) + self.ListArtifacts = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/ListArtifacts', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsResponse.FromString, + ) + self.ListDatasets = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/ListDatasets', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsResponse.FromString, + ) + self.UpdateArtifact = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/UpdateArtifact', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactResponse.FromString, + ) + self.GetOrExtendReservation = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationResponse.FromString, + ) + self.ReleaseReservation = channel.unary_unary( + '/flyteidl2.datacatalog.DataCatalog/ReleaseReservation', + request_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationRequest.SerializeToString, + response_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationResponse.FromString, + ) + + +class DataCatalogServicer(object): + """TODO @pvditt clean this up + + + Data Catalog service definition + Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + Artifacts are associated with a Dataset, and can be tagged for retrieval. + """ + + def CreateDataset(self, request, context): + """Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + Each dataset can have one or more artifacts + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetDataset(self, request, context): + """Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateArtifact(self, request, context): + """Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + files or data values + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetArtifact(self, request, context): + """Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddTag(self, request, context): + """Associate a tag with an artifact. Tags are unique within a Dataset. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListArtifacts(self, request, context): + """Return a paginated list of artifacts + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ListDatasets(self, request, context): + """Return a paginated list of datasets + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateArtifact(self, request, context): + """Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetOrExtendReservation(self, request, context): + """Attempts to get or extend a reservation for the corresponding artifact. If one already exists + (ie. another entity owns the reservation) then that reservation is retrieved. + Once you acquire a reservation, you need to periodically extend the reservation with an + identical call. If the reservation is not extended before the defined expiration, it may be + acquired by another task. + Note: We may have multiple concurrent tasks with the same signature and the same input that + try to populate the same artifact at the same time. Thus with reservation, only one task can + run at a time, until the reservation expires. + Note: If task A does not extend the reservation in time and the reservation expires, another + task B may take over the reservation, resulting in two tasks A and B running in parallel. So + a third task C may get the Artifact from A or B, whichever writes last. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleaseReservation(self, request, context): + """Release the reservation when the task holding the spot fails so that the other tasks + can grab the spot. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_DataCatalogServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateDataset': grpc.unary_unary_rpc_method_handler( + servicer.CreateDataset, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetResponse.SerializeToString, + ), + 'GetDataset': grpc.unary_unary_rpc_method_handler( + servicer.GetDataset, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetResponse.SerializeToString, + ), + 'CreateArtifact': grpc.unary_unary_rpc_method_handler( + servicer.CreateArtifact, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactResponse.SerializeToString, + ), + 'GetArtifact': grpc.unary_unary_rpc_method_handler( + servicer.GetArtifact, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactResponse.SerializeToString, + ), + 'AddTag': grpc.unary_unary_rpc_method_handler( + servicer.AddTag, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagResponse.SerializeToString, + ), + 'ListArtifacts': grpc.unary_unary_rpc_method_handler( + servicer.ListArtifacts, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsResponse.SerializeToString, + ), + 'ListDatasets': grpc.unary_unary_rpc_method_handler( + servicer.ListDatasets, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsResponse.SerializeToString, + ), + 'UpdateArtifact': grpc.unary_unary_rpc_method_handler( + servicer.UpdateArtifact, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactResponse.SerializeToString, + ), + 'GetOrExtendReservation': grpc.unary_unary_rpc_method_handler( + servicer.GetOrExtendReservation, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationResponse.SerializeToString, + ), + 'ReleaseReservation': grpc.unary_unary_rpc_method_handler( + servicer.ReleaseReservation, + request_deserializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationRequest.FromString, + response_serializer=flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'flyteidl2.datacatalog.DataCatalog', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class DataCatalog(object): + """TODO @pvditt clean this up + + + Data Catalog service definition + Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + Artifacts are associated with a Dataset, and can be tagged for retrieval. + """ + + @staticmethod + def CreateDataset(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/CreateDataset', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateDatasetResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetDataset(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/GetDataset', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetDatasetResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreateArtifact(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/CreateArtifact', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.CreateArtifactResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetArtifact(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/GetArtifact', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetArtifactResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddTag(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/AddTag', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.AddTagResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListArtifacts(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/ListArtifacts', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListArtifactsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ListDatasets(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/ListDatasets', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ListDatasetsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def UpdateArtifact(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/UpdateArtifact', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.UpdateArtifactResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetOrExtendReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.GetOrExtendReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReleaseReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/flyteidl2.datacatalog.DataCatalog/ReleaseReservation', + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationRequest.SerializeToString, + flyteidl2_dot_datacatalog_dot_datacatalog__pb2.ReleaseReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/gen/python/flyteidl2/event/__init__.py b/gen/python/flyteidl2/event/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gen/python/flyteidl2/event/cloudevents_pb2.py b/gen/python/flyteidl2/event/cloudevents_pb2.py new file mode 100644 index 0000000000..a6dfd4a58c --- /dev/null +++ b/gen/python/flyteidl2/event/cloudevents_pb2.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/event/cloudevents.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import artifact_id_pb2 as flyteidl2_dot_core_dot_artifact__id__pb2 +from flyteidl2.core import identifier_pb2 as flyteidl2_dot_core_dot_identifier__pb2 +from flyteidl2.core import interface_pb2 as flyteidl2_dot_core_dot_interface__pb2 +from flyteidl2.event import event_pb2 as flyteidl2_dot_event_dot_event__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!flyteidl2/event/cloudevents.proto\x12\x0f\x66lyteidl2.event\x1a flyteidl2/core/artifact_id.proto\x1a\x1f\x66lyteidl2/core/identifier.proto\x1a\x1e\x66lyteidl2/core/interface.proto\x1a\x1b\x66lyteidl2/event/event.proto\"\xb8\x04\n\x1b\x43loudEventWorkflowExecution\x12\x44\n\traw_event\x18\x01 \x01(\x0b\x32\'.flyteidl2.event.WorkflowExecutionEventR\x08rawEvent\x12I\n\x10output_interface\x18\x02 \x01(\x0b\x32\x1e.flyteidl2.core.TypedInterfaceR\x0foutputInterface\x12=\n\x0c\x61rtifact_ids\x18\x03 \x03(\x0b\x32\x1a.flyteidl2.core.ArtifactIDR\x0b\x61rtifactIds\x12\\\n\x13reference_execution\x18\x04 \x01(\x0b\x32+.flyteidl2.core.WorkflowExecutionIdentifierR\x12referenceExecution\x12\x1c\n\tprincipal\x18\x05 \x01(\tR\tprincipal\x12@\n\x0elaunch_plan_id\x18\x06 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x0claunchPlanId\x12P\n\x06labels\x18\x07 \x03(\x0b\x32\x38.flyteidl2.event.CloudEventWorkflowExecution.LabelsEntryR\x06labels\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\x99\x04\n\x17\x43loudEventNodeExecution\x12@\n\traw_event\x18\x01 \x01(\x0b\x32#.flyteidl2.event.NodeExecutionEventR\x08rawEvent\x12I\n\x0ctask_exec_id\x18\x02 \x01(\x0b\x32\'.flyteidl2.core.TaskExecutionIdentifierR\ntaskExecId\x12I\n\x10output_interface\x18\x03 \x01(\x0b\x32\x1e.flyteidl2.core.TypedInterfaceR\x0foutputInterface\x12=\n\x0c\x61rtifact_ids\x18\x04 \x03(\x0b\x32\x1a.flyteidl2.core.ArtifactIDR\x0b\x61rtifactIds\x12\x1c\n\tprincipal\x18\x05 \x01(\tR\tprincipal\x12@\n\x0elaunch_plan_id\x18\x06 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x0claunchPlanId\x12L\n\x06labels\x18\x07 \x03(\x0b\x32\x34.flyteidl2.event.CloudEventNodeExecution.LabelsEntryR\x06labels\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\xe4\x01\n\x17\x43loudEventTaskExecution\x12@\n\traw_event\x18\x01 \x01(\x0b\x32#.flyteidl2.event.TaskExecutionEventR\x08rawEvent\x12L\n\x06labels\x18\x02 \x03(\x0b\x32\x34.flyteidl2.event.CloudEventTaskExecution.LabelsEntryR\x06labels\x1a\x39\n\x0bLabelsEntry\x12\x10\n\x03key\x18\x01 \x01(\tR\x03key\x12\x14\n\x05value\x18\x02 \x01(\tR\x05value:\x02\x38\x01\"\xf3\x02\n\x18\x43loudEventExecutionStart\x12N\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32+.flyteidl2.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\x12@\n\x0elaunch_plan_id\x18\x02 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x0claunchPlanId\x12;\n\x0bworkflow_id\x18\x03 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\nworkflowId\x12=\n\x0c\x61rtifact_ids\x18\x04 \x03(\x0b\x32\x1a.flyteidl2.core.ArtifactIDR\x0b\x61rtifactIds\x12+\n\x11\x61rtifact_trackers\x18\x05 \x03(\tR\x10\x61rtifactTrackers\x12\x1c\n\tprincipal\x18\x06 \x01(\tR\tprincipalB\xbb\x01\n\x13\x63om.flyteidl2.eventB\x10\x43loudeventsProtoH\x02P\x01Z3github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event\xa2\x02\x03\x46\x45X\xaa\x02\x0f\x46lyteidl2.Event\xca\x02\x0f\x46lyteidl2\\Event\xe2\x02\x1b\x46lyteidl2\\Event\\GPBMetadata\xea\x02\x10\x46lyteidl2::Eventb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.event.cloudevents_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.flyteidl2.eventB\020CloudeventsProtoH\002P\001Z3github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event\242\002\003FEX\252\002\017Flyteidl2.Event\312\002\017Flyteidl2\\Event\342\002\033Flyteidl2\\Event\\GPBMetadata\352\002\020Flyteidl2::Event' + _CLOUDEVENTWORKFLOWEXECUTION_LABELSENTRY._options = None + _CLOUDEVENTWORKFLOWEXECUTION_LABELSENTRY._serialized_options = b'8\001' + _CLOUDEVENTNODEEXECUTION_LABELSENTRY._options = None + _CLOUDEVENTNODEEXECUTION_LABELSENTRY._serialized_options = b'8\001' + _CLOUDEVENTTASKEXECUTION_LABELSENTRY._options = None + _CLOUDEVENTTASKEXECUTION_LABELSENTRY._serialized_options = b'8\001' + _globals['_CLOUDEVENTWORKFLOWEXECUTION']._serialized_start=183 + _globals['_CLOUDEVENTWORKFLOWEXECUTION']._serialized_end=751 + _globals['_CLOUDEVENTWORKFLOWEXECUTION_LABELSENTRY']._serialized_start=694 + _globals['_CLOUDEVENTWORKFLOWEXECUTION_LABELSENTRY']._serialized_end=751 + _globals['_CLOUDEVENTNODEEXECUTION']._serialized_start=754 + _globals['_CLOUDEVENTNODEEXECUTION']._serialized_end=1291 + _globals['_CLOUDEVENTNODEEXECUTION_LABELSENTRY']._serialized_start=694 + _globals['_CLOUDEVENTNODEEXECUTION_LABELSENTRY']._serialized_end=751 + _globals['_CLOUDEVENTTASKEXECUTION']._serialized_start=1294 + _globals['_CLOUDEVENTTASKEXECUTION']._serialized_end=1522 + _globals['_CLOUDEVENTTASKEXECUTION_LABELSENTRY']._serialized_start=694 + _globals['_CLOUDEVENTTASKEXECUTION_LABELSENTRY']._serialized_end=751 + _globals['_CLOUDEVENTEXECUTIONSTART']._serialized_start=1525 + _globals['_CLOUDEVENTEXECUTIONSTART']._serialized_end=1896 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/event/cloudevents_pb2.pyi b/gen/python/flyteidl2/event/cloudevents_pb2.pyi new file mode 100644 index 0000000000..a5dabf91a3 --- /dev/null +++ b/gen/python/flyteidl2/event/cloudevents_pb2.pyi @@ -0,0 +1,91 @@ +from flyteidl2.core import artifact_id_pb2 as _artifact_id_pb2 +from flyteidl2.core import identifier_pb2 as _identifier_pb2 +from flyteidl2.core import interface_pb2 as _interface_pb2 +from flyteidl2.event import event_pb2 as _event_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class CloudEventWorkflowExecution(_message.Message): + __slots__ = ["raw_event", "output_interface", "artifact_ids", "reference_execution", "principal", "launch_plan_id", "labels"] + class LabelsEntry(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + RAW_EVENT_FIELD_NUMBER: _ClassVar[int] + OUTPUT_INTERFACE_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_IDS_FIELD_NUMBER: _ClassVar[int] + REFERENCE_EXECUTION_FIELD_NUMBER: _ClassVar[int] + PRINCIPAL_FIELD_NUMBER: _ClassVar[int] + LAUNCH_PLAN_ID_FIELD_NUMBER: _ClassVar[int] + LABELS_FIELD_NUMBER: _ClassVar[int] + raw_event: _event_pb2.WorkflowExecutionEvent + output_interface: _interface_pb2.TypedInterface + artifact_ids: _containers.RepeatedCompositeFieldContainer[_artifact_id_pb2.ArtifactID] + reference_execution: _identifier_pb2.WorkflowExecutionIdentifier + principal: str + launch_plan_id: _identifier_pb2.Identifier + labels: _containers.ScalarMap[str, str] + def __init__(self, raw_event: _Optional[_Union[_event_pb2.WorkflowExecutionEvent, _Mapping]] = ..., output_interface: _Optional[_Union[_interface_pb2.TypedInterface, _Mapping]] = ..., artifact_ids: _Optional[_Iterable[_Union[_artifact_id_pb2.ArtifactID, _Mapping]]] = ..., reference_execution: _Optional[_Union[_identifier_pb2.WorkflowExecutionIdentifier, _Mapping]] = ..., principal: _Optional[str] = ..., launch_plan_id: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., labels: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class CloudEventNodeExecution(_message.Message): + __slots__ = ["raw_event", "task_exec_id", "output_interface", "artifact_ids", "principal", "launch_plan_id", "labels"] + class LabelsEntry(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + RAW_EVENT_FIELD_NUMBER: _ClassVar[int] + TASK_EXEC_ID_FIELD_NUMBER: _ClassVar[int] + OUTPUT_INTERFACE_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_IDS_FIELD_NUMBER: _ClassVar[int] + PRINCIPAL_FIELD_NUMBER: _ClassVar[int] + LAUNCH_PLAN_ID_FIELD_NUMBER: _ClassVar[int] + LABELS_FIELD_NUMBER: _ClassVar[int] + raw_event: _event_pb2.NodeExecutionEvent + task_exec_id: _identifier_pb2.TaskExecutionIdentifier + output_interface: _interface_pb2.TypedInterface + artifact_ids: _containers.RepeatedCompositeFieldContainer[_artifact_id_pb2.ArtifactID] + principal: str + launch_plan_id: _identifier_pb2.Identifier + labels: _containers.ScalarMap[str, str] + def __init__(self, raw_event: _Optional[_Union[_event_pb2.NodeExecutionEvent, _Mapping]] = ..., task_exec_id: _Optional[_Union[_identifier_pb2.TaskExecutionIdentifier, _Mapping]] = ..., output_interface: _Optional[_Union[_interface_pb2.TypedInterface, _Mapping]] = ..., artifact_ids: _Optional[_Iterable[_Union[_artifact_id_pb2.ArtifactID, _Mapping]]] = ..., principal: _Optional[str] = ..., launch_plan_id: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., labels: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class CloudEventTaskExecution(_message.Message): + __slots__ = ["raw_event", "labels"] + class LabelsEntry(_message.Message): + __slots__ = ["key", "value"] + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + RAW_EVENT_FIELD_NUMBER: _ClassVar[int] + LABELS_FIELD_NUMBER: _ClassVar[int] + raw_event: _event_pb2.TaskExecutionEvent + labels: _containers.ScalarMap[str, str] + def __init__(self, raw_event: _Optional[_Union[_event_pb2.TaskExecutionEvent, _Mapping]] = ..., labels: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class CloudEventExecutionStart(_message.Message): + __slots__ = ["execution_id", "launch_plan_id", "workflow_id", "artifact_ids", "artifact_trackers", "principal"] + EXECUTION_ID_FIELD_NUMBER: _ClassVar[int] + LAUNCH_PLAN_ID_FIELD_NUMBER: _ClassVar[int] + WORKFLOW_ID_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_IDS_FIELD_NUMBER: _ClassVar[int] + ARTIFACT_TRACKERS_FIELD_NUMBER: _ClassVar[int] + PRINCIPAL_FIELD_NUMBER: _ClassVar[int] + execution_id: _identifier_pb2.WorkflowExecutionIdentifier + launch_plan_id: _identifier_pb2.Identifier + workflow_id: _identifier_pb2.Identifier + artifact_ids: _containers.RepeatedCompositeFieldContainer[_artifact_id_pb2.ArtifactID] + artifact_trackers: _containers.RepeatedScalarFieldContainer[str] + principal: str + def __init__(self, execution_id: _Optional[_Union[_identifier_pb2.WorkflowExecutionIdentifier, _Mapping]] = ..., launch_plan_id: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., workflow_id: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., artifact_ids: _Optional[_Iterable[_Union[_artifact_id_pb2.ArtifactID, _Mapping]]] = ..., artifact_trackers: _Optional[_Iterable[str]] = ..., principal: _Optional[str] = ...) -> None: ... diff --git a/gen/python/flyteidl2/event/cloudevents_pb2_grpc.py b/gen/python/flyteidl2/event/cloudevents_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/event/cloudevents_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/event/event_pb2.py b/gen/python/flyteidl2/event/event_pb2.py new file mode 100644 index 0000000000..9b5a4f0edb --- /dev/null +++ b/gen/python/flyteidl2/event/event_pb2.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/event/event.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import catalog_pb2 as flyteidl2_dot_core_dot_catalog__pb2 +from flyteidl2.core import execution_pb2 as flyteidl2_dot_core_dot_execution__pb2 +from flyteidl2.core import identifier_pb2 as flyteidl2_dot_core_dot_identifier__pb2 +from flyteidl2.core import literals_pb2 as flyteidl2_dot_core_dot_literals__pb2 +from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66lyteidl2/event/event.proto\x12\x0f\x66lyteidl2.event\x1a\x1c\x66lyteidl2/core/catalog.proto\x1a\x1e\x66lyteidl2/core/execution.proto\x1a\x1f\x66lyteidl2/core/identifier.proto\x1a\x1d\x66lyteidl2/core/literals.proto\x1a\x1cgoogle/protobuf/struct.proto\x1a\x1fgoogle/protobuf/timestamp.proto\"\xae\x03\n\x16WorkflowExecutionEvent\x12N\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32+.flyteidl2.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12=\n\x05phase\x18\x03 \x01(\x0e\x32\'.flyteidl2.core.WorkflowExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1f\n\noutput_uri\x18\x05 \x01(\tH\x00R\toutputUri\x12\x36\n\x05\x65rror\x18\x06 \x01(\x0b\x32\x1e.flyteidl2.core.ExecutionErrorH\x00R\x05\x65rror\x12=\n\x0boutput_data\x18\x07 \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x00R\noutputDataB\x0f\n\routput_result\"\xbe\n\n\x12NodeExecutionEvent\x12\x37\n\x02id\x18\x01 \x01(\x0b\x32\'.flyteidl2.core.NodeExecutionIdentifierR\x02id\x12\x1f\n\x0bproducer_id\x18\x02 \x01(\tR\nproducerId\x12\x39\n\x05phase\x18\x03 \x01(\x0e\x32#.flyteidl2.core.NodeExecution.PhaseR\x05phase\x12;\n\x0boccurred_at\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x05 \x01(\tH\x00R\x08inputUri\x12;\n\ninput_data\x18\x14 \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\x06 \x01(\tH\x01R\toutputUri\x12\x36\n\x05\x65rror\x18\x07 \x01(\x0b\x32\x1e.flyteidl2.core.ExecutionErrorH\x01R\x05\x65rror\x12=\n\x0boutput_data\x18\x0f \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x01R\noutputData\x12]\n\x16workflow_node_metadata\x18\x08 \x01(\x0b\x32%.flyteidl2.event.WorkflowNodeMetadataH\x02R\x14workflowNodeMetadata\x12Q\n\x12task_node_metadata\x18\x0e \x01(\x0b\x32!.flyteidl2.event.TaskNodeMetadataH\x02R\x10taskNodeMetadata\x12^\n\x14parent_task_metadata\x18\t \x01(\x0b\x32,.flyteidl2.event.ParentTaskExecutionMetadataR\x12parentTaskMetadata\x12^\n\x14parent_node_metadata\x18\n \x01(\x0b\x32,.flyteidl2.event.ParentNodeExecutionMetadataR\x12parentNodeMetadata\x12\x1f\n\x0bretry_group\x18\x0b \x01(\tR\nretryGroup\x12 \n\x0cspec_node_id\x18\x0c \x01(\tR\nspecNodeId\x12\x1b\n\tnode_name\x18\r \x01(\tR\x08nodeName\x12#\n\revent_version\x18\x10 \x01(\x05R\x0c\x65ventVersion\x12\x1b\n\tis_parent\x18\x11 \x01(\x08R\x08isParent\x12\x1d\n\nis_dynamic\x18\x12 \x01(\x08R\tisDynamic\x12\x19\n\x08\x64\x65\x63k_uri\x18\x13 \x01(\tR\x07\x64\x65\x63kUri\x12;\n\x0breported_at\x18\x15 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAt\x12\x19\n\x08is_array\x18\x16 \x01(\x08R\x07isArray\x12?\n\rtarget_entity\x18\x17 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x0ctargetEntity\x12-\n\x13is_in_dynamic_chain\x18\x18 \x01(\x08R\x10isInDynamicChain\x12\x19\n\x08is_eager\x18\x19 \x01(\x08R\x07isEagerB\r\n\x0binput_valueB\x0f\n\routput_resultB\x11\n\x0ftarget_metadata\"f\n\x14WorkflowNodeMetadata\x12N\n\x0c\x65xecution_id\x18\x01 \x01(\x0b\x32+.flyteidl2.core.WorkflowExecutionIdentifierR\x0b\x65xecutionId\"\x9c\x02\n\x10TaskNodeMetadata\x12\x45\n\x0c\x63\x61\x63he_status\x18\x01 \x01(\x0e\x32\".flyteidl2.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12@\n\x0b\x63\x61talog_key\x18\x02 \x01(\x0b\x32\x1f.flyteidl2.core.CatalogMetadataR\ncatalogKey\x12X\n\x12reservation_status\x18\x03 \x01(\x0e\x32).flyteidl2.core.CatalogReservation.StatusR\x11reservationStatus\x12%\n\x0e\x63heckpoint_uri\x18\x04 \x01(\tR\rcheckpointUri\"V\n\x1bParentTaskExecutionMetadata\x12\x37\n\x02id\x18\x01 \x01(\x0b\x32\'.flyteidl2.core.TaskExecutionIdentifierR\x02id\"6\n\x1bParentNodeExecutionMetadata\x12\x17\n\x07node_id\x18\x01 \x01(\tR\x06nodeId\"b\n\x0b\x45ventReason\x12\x16\n\x06reason\x18\x01 \x01(\tR\x06reason\x12;\n\x0boccurred_at\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\"\xdd\x08\n\x12TaskExecutionEvent\x12\x33\n\x07task_id\x18\x01 \x01(\x0b\x32\x1a.flyteidl2.core.IdentifierR\x06taskId\x12`\n\x18parent_node_execution_id\x18\x02 \x01(\x0b\x32\'.flyteidl2.core.NodeExecutionIdentifierR\x15parentNodeExecutionId\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x39\n\x05phase\x18\x04 \x01(\x0e\x32#.flyteidl2.core.TaskExecution.PhaseR\x05phase\x12\x1f\n\x0bproducer_id\x18\x05 \x01(\tR\nproducerId\x12+\n\x04logs\x18\x06 \x03(\x0b\x32\x17.flyteidl2.core.TaskLogR\x04logs\x12;\n\x0boccurred_at\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\noccurredAt\x12\x1d\n\tinput_uri\x18\x08 \x01(\tH\x00R\x08inputUri\x12;\n\ninput_data\x18\x13 \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x00R\tinputData\x12\x1f\n\noutput_uri\x18\t \x01(\tH\x01R\toutputUri\x12\x36\n\x05\x65rror\x18\n \x01(\x0b\x32\x1e.flyteidl2.core.ExecutionErrorH\x01R\x05\x65rror\x12=\n\x0boutput_data\x18\x11 \x01(\x0b\x32\x1a.flyteidl2.core.LiteralMapH\x01R\noutputData\x12\x38\n\x0b\x63ustom_info\x18\x0b \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\x12#\n\rphase_version\x18\x0c \x01(\rR\x0cphaseVersion\x12\x1a\n\x06reason\x18\r \x01(\tB\x02\x18\x01R\x06reason\x12\x36\n\x07reasons\x18\x15 \x03(\x0b\x32\x1c.flyteidl2.event.EventReasonR\x07reasons\x12\x1b\n\ttask_type\x18\x0e \x01(\tR\x08taskType\x12\x42\n\x08metadata\x18\x10 \x01(\x0b\x32&.flyteidl2.event.TaskExecutionMetadataR\x08metadata\x12#\n\revent_version\x18\x12 \x01(\x05R\x0c\x65ventVersion\x12;\n\x0breported_at\x18\x14 \x01(\x0b\x32\x1a.google.protobuf.TimestampR\nreportedAt\x12;\n\x0blog_context\x18\x16 \x01(\x0b\x32\x1a.flyteidl2.core.LogContextR\nlogContextB\r\n\x0binput_valueB\x0f\n\routput_result\"\x8a\x04\n\x14\x45xternalResourceInfo\x12\x1f\n\x0b\x65xternal_id\x18\x01 \x01(\tR\nexternalId\x12\x14\n\x05index\x18\x02 \x01(\rR\x05index\x12#\n\rretry_attempt\x18\x03 \x01(\rR\x0cretryAttempt\x12\x39\n\x05phase\x18\x04 \x01(\x0e\x32#.flyteidl2.core.TaskExecution.PhaseR\x05phase\x12\x45\n\x0c\x63\x61\x63he_status\x18\x05 \x01(\x0e\x32\".flyteidl2.core.CatalogCacheStatusR\x0b\x63\x61\x63heStatus\x12+\n\x04logs\x18\x06 \x03(\x0b\x32\x17.flyteidl2.core.TaskLogR\x04logs\x12]\n\x16workflow_node_metadata\x18\x07 \x01(\x0b\x32%.flyteidl2.event.WorkflowNodeMetadataH\x00R\x14workflowNodeMetadata\x12\x38\n\x0b\x63ustom_info\x18\x08 \x01(\x0b\x32\x17.google.protobuf.StructR\ncustomInfo\x12;\n\x0blog_context\x18\t \x01(\x0b\x32\x1a.flyteidl2.core.LogContextR\nlogContextB\x11\n\x0ftarget_metadata\"[\n\x10ResourcePoolInfo\x12)\n\x10\x61llocation_token\x18\x01 \x01(\tR\x0f\x61llocationToken\x12\x1c\n\tnamespace\x18\x02 \x01(\tR\tnamespace\"\xa0\x03\n\x15TaskExecutionMetadata\x12%\n\x0egenerated_name\x18\x01 \x01(\tR\rgeneratedName\x12T\n\x12\x65xternal_resources\x18\x02 \x03(\x0b\x32%.flyteidl2.event.ExternalResourceInfoR\x11\x65xternalResources\x12O\n\x12resource_pool_info\x18\x03 \x03(\x0b\x32!.flyteidl2.event.ResourcePoolInfoR\x10resourcePoolInfo\x12+\n\x11plugin_identifier\x18\x04 \x01(\tR\x10pluginIdentifier\x12[\n\x0einstance_class\x18\x10 \x01(\x0e\x32\x34.flyteidl2.event.TaskExecutionMetadata.InstanceClassR\rinstanceClass\"/\n\rInstanceClass\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x11\n\rINTERRUPTIBLE\x10\x01\x42\xb5\x01\n\x13\x63om.flyteidl2.eventB\nEventProtoH\x02P\x01Z3github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event\xa2\x02\x03\x46\x45X\xaa\x02\x0f\x46lyteidl2.Event\xca\x02\x0f\x46lyteidl2\\Event\xe2\x02\x1b\x46lyteidl2\\Event\\GPBMetadata\xea\x02\x10\x46lyteidl2::Eventb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.event.event_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\023com.flyteidl2.eventB\nEventProtoH\002P\001Z3github.com/flyteorg/flyte/v2/gen/go/flyteidl2/event\242\002\003FEX\252\002\017Flyteidl2.Event\312\002\017Flyteidl2\\Event\342\002\033Flyteidl2\\Event\\GPBMetadata\352\002\020Flyteidl2::Event' + _TASKEXECUTIONEVENT.fields_by_name['reason']._options = None + _TASKEXECUTIONEVENT.fields_by_name['reason']._serialized_options = b'\030\001' + _globals['_WORKFLOWEXECUTIONEVENT']._serialized_start=238 + _globals['_WORKFLOWEXECUTIONEVENT']._serialized_end=668 + _globals['_NODEEXECUTIONEVENT']._serialized_start=671 + _globals['_NODEEXECUTIONEVENT']._serialized_end=2013 + _globals['_WORKFLOWNODEMETADATA']._serialized_start=2015 + _globals['_WORKFLOWNODEMETADATA']._serialized_end=2117 + _globals['_TASKNODEMETADATA']._serialized_start=2120 + _globals['_TASKNODEMETADATA']._serialized_end=2404 + _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_start=2406 + _globals['_PARENTTASKEXECUTIONMETADATA']._serialized_end=2492 + _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_start=2494 + _globals['_PARENTNODEEXECUTIONMETADATA']._serialized_end=2548 + _globals['_EVENTREASON']._serialized_start=2550 + _globals['_EVENTREASON']._serialized_end=2648 + _globals['_TASKEXECUTIONEVENT']._serialized_start=2651 + _globals['_TASKEXECUTIONEVENT']._serialized_end=3768 + _globals['_EXTERNALRESOURCEINFO']._serialized_start=3771 + _globals['_EXTERNALRESOURCEINFO']._serialized_end=4293 + _globals['_RESOURCEPOOLINFO']._serialized_start=4295 + _globals['_RESOURCEPOOLINFO']._serialized_end=4386 + _globals['_TASKEXECUTIONMETADATA']._serialized_start=4389 + _globals['_TASKEXECUTIONMETADATA']._serialized_end=4805 + _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_start=4758 + _globals['_TASKEXECUTIONMETADATA_INSTANCECLASS']._serialized_end=4805 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/event/event_pb2.pyi b/gen/python/flyteidl2/event/event_pb2.pyi new file mode 100644 index 0000000000..03d6a6579f --- /dev/null +++ b/gen/python/flyteidl2/event/event_pb2.pyi @@ -0,0 +1,219 @@ +from flyteidl2.core import catalog_pb2 as _catalog_pb2 +from flyteidl2.core import execution_pb2 as _execution_pb2 +from flyteidl2.core import identifier_pb2 as _identifier_pb2 +from flyteidl2.core import literals_pb2 as _literals_pb2 +from google.protobuf import struct_pb2 as _struct_pb2 +from google.protobuf import timestamp_pb2 as _timestamp_pb2 +from google.protobuf.internal import containers as _containers +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class WorkflowExecutionEvent(_message.Message): + __slots__ = ["execution_id", "producer_id", "phase", "occurred_at", "output_uri", "error", "output_data"] + EXECUTION_ID_FIELD_NUMBER: _ClassVar[int] + PRODUCER_ID_FIELD_NUMBER: _ClassVar[int] + PHASE_FIELD_NUMBER: _ClassVar[int] + OCCURRED_AT_FIELD_NUMBER: _ClassVar[int] + OUTPUT_URI_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + OUTPUT_DATA_FIELD_NUMBER: _ClassVar[int] + execution_id: _identifier_pb2.WorkflowExecutionIdentifier + producer_id: str + phase: _execution_pb2.WorkflowExecution.Phase + occurred_at: _timestamp_pb2.Timestamp + output_uri: str + error: _execution_pb2.ExecutionError + output_data: _literals_pb2.LiteralMap + def __init__(self, execution_id: _Optional[_Union[_identifier_pb2.WorkflowExecutionIdentifier, _Mapping]] = ..., producer_id: _Optional[str] = ..., phase: _Optional[_Union[_execution_pb2.WorkflowExecution.Phase, str]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ...) -> None: ... + +class NodeExecutionEvent(_message.Message): + __slots__ = ["id", "producer_id", "phase", "occurred_at", "input_uri", "input_data", "output_uri", "error", "output_data", "workflow_node_metadata", "task_node_metadata", "parent_task_metadata", "parent_node_metadata", "retry_group", "spec_node_id", "node_name", "event_version", "is_parent", "is_dynamic", "deck_uri", "reported_at", "is_array", "target_entity", "is_in_dynamic_chain", "is_eager"] + ID_FIELD_NUMBER: _ClassVar[int] + PRODUCER_ID_FIELD_NUMBER: _ClassVar[int] + PHASE_FIELD_NUMBER: _ClassVar[int] + OCCURRED_AT_FIELD_NUMBER: _ClassVar[int] + INPUT_URI_FIELD_NUMBER: _ClassVar[int] + INPUT_DATA_FIELD_NUMBER: _ClassVar[int] + OUTPUT_URI_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + OUTPUT_DATA_FIELD_NUMBER: _ClassVar[int] + WORKFLOW_NODE_METADATA_FIELD_NUMBER: _ClassVar[int] + TASK_NODE_METADATA_FIELD_NUMBER: _ClassVar[int] + PARENT_TASK_METADATA_FIELD_NUMBER: _ClassVar[int] + PARENT_NODE_METADATA_FIELD_NUMBER: _ClassVar[int] + RETRY_GROUP_FIELD_NUMBER: _ClassVar[int] + SPEC_NODE_ID_FIELD_NUMBER: _ClassVar[int] + NODE_NAME_FIELD_NUMBER: _ClassVar[int] + EVENT_VERSION_FIELD_NUMBER: _ClassVar[int] + IS_PARENT_FIELD_NUMBER: _ClassVar[int] + IS_DYNAMIC_FIELD_NUMBER: _ClassVar[int] + DECK_URI_FIELD_NUMBER: _ClassVar[int] + REPORTED_AT_FIELD_NUMBER: _ClassVar[int] + IS_ARRAY_FIELD_NUMBER: _ClassVar[int] + TARGET_ENTITY_FIELD_NUMBER: _ClassVar[int] + IS_IN_DYNAMIC_CHAIN_FIELD_NUMBER: _ClassVar[int] + IS_EAGER_FIELD_NUMBER: _ClassVar[int] + id: _identifier_pb2.NodeExecutionIdentifier + producer_id: str + phase: _execution_pb2.NodeExecution.Phase + occurred_at: _timestamp_pb2.Timestamp + input_uri: str + input_data: _literals_pb2.LiteralMap + output_uri: str + error: _execution_pb2.ExecutionError + output_data: _literals_pb2.LiteralMap + workflow_node_metadata: WorkflowNodeMetadata + task_node_metadata: TaskNodeMetadata + parent_task_metadata: ParentTaskExecutionMetadata + parent_node_metadata: ParentNodeExecutionMetadata + retry_group: str + spec_node_id: str + node_name: str + event_version: int + is_parent: bool + is_dynamic: bool + deck_uri: str + reported_at: _timestamp_pb2.Timestamp + is_array: bool + target_entity: _identifier_pb2.Identifier + is_in_dynamic_chain: bool + is_eager: bool + def __init__(self, id: _Optional[_Union[_identifier_pb2.NodeExecutionIdentifier, _Mapping]] = ..., producer_id: _Optional[str] = ..., phase: _Optional[_Union[_execution_pb2.NodeExecution.Phase, str]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input_uri: _Optional[str] = ..., input_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., workflow_node_metadata: _Optional[_Union[WorkflowNodeMetadata, _Mapping]] = ..., task_node_metadata: _Optional[_Union[TaskNodeMetadata, _Mapping]] = ..., parent_task_metadata: _Optional[_Union[ParentTaskExecutionMetadata, _Mapping]] = ..., parent_node_metadata: _Optional[_Union[ParentNodeExecutionMetadata, _Mapping]] = ..., retry_group: _Optional[str] = ..., spec_node_id: _Optional[str] = ..., node_name: _Optional[str] = ..., event_version: _Optional[int] = ..., is_parent: bool = ..., is_dynamic: bool = ..., deck_uri: _Optional[str] = ..., reported_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., is_array: bool = ..., target_entity: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., is_in_dynamic_chain: bool = ..., is_eager: bool = ...) -> None: ... + +class WorkflowNodeMetadata(_message.Message): + __slots__ = ["execution_id"] + EXECUTION_ID_FIELD_NUMBER: _ClassVar[int] + execution_id: _identifier_pb2.WorkflowExecutionIdentifier + def __init__(self, execution_id: _Optional[_Union[_identifier_pb2.WorkflowExecutionIdentifier, _Mapping]] = ...) -> None: ... + +class TaskNodeMetadata(_message.Message): + __slots__ = ["cache_status", "catalog_key", "reservation_status", "checkpoint_uri"] + CACHE_STATUS_FIELD_NUMBER: _ClassVar[int] + CATALOG_KEY_FIELD_NUMBER: _ClassVar[int] + RESERVATION_STATUS_FIELD_NUMBER: _ClassVar[int] + CHECKPOINT_URI_FIELD_NUMBER: _ClassVar[int] + cache_status: _catalog_pb2.CatalogCacheStatus + catalog_key: _catalog_pb2.CatalogMetadata + reservation_status: _catalog_pb2.CatalogReservation.Status + checkpoint_uri: str + def __init__(self, cache_status: _Optional[_Union[_catalog_pb2.CatalogCacheStatus, str]] = ..., catalog_key: _Optional[_Union[_catalog_pb2.CatalogMetadata, _Mapping]] = ..., reservation_status: _Optional[_Union[_catalog_pb2.CatalogReservation.Status, str]] = ..., checkpoint_uri: _Optional[str] = ...) -> None: ... + +class ParentTaskExecutionMetadata(_message.Message): + __slots__ = ["id"] + ID_FIELD_NUMBER: _ClassVar[int] + id: _identifier_pb2.TaskExecutionIdentifier + def __init__(self, id: _Optional[_Union[_identifier_pb2.TaskExecutionIdentifier, _Mapping]] = ...) -> None: ... + +class ParentNodeExecutionMetadata(_message.Message): + __slots__ = ["node_id"] + NODE_ID_FIELD_NUMBER: _ClassVar[int] + node_id: str + def __init__(self, node_id: _Optional[str] = ...) -> None: ... + +class EventReason(_message.Message): + __slots__ = ["reason", "occurred_at"] + REASON_FIELD_NUMBER: _ClassVar[int] + OCCURRED_AT_FIELD_NUMBER: _ClassVar[int] + reason: str + occurred_at: _timestamp_pb2.Timestamp + def __init__(self, reason: _Optional[str] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ...) -> None: ... + +class TaskExecutionEvent(_message.Message): + __slots__ = ["task_id", "parent_node_execution_id", "retry_attempt", "phase", "producer_id", "logs", "occurred_at", "input_uri", "input_data", "output_uri", "error", "output_data", "custom_info", "phase_version", "reason", "reasons", "task_type", "metadata", "event_version", "reported_at", "log_context"] + TASK_ID_FIELD_NUMBER: _ClassVar[int] + PARENT_NODE_EXECUTION_ID_FIELD_NUMBER: _ClassVar[int] + RETRY_ATTEMPT_FIELD_NUMBER: _ClassVar[int] + PHASE_FIELD_NUMBER: _ClassVar[int] + PRODUCER_ID_FIELD_NUMBER: _ClassVar[int] + LOGS_FIELD_NUMBER: _ClassVar[int] + OCCURRED_AT_FIELD_NUMBER: _ClassVar[int] + INPUT_URI_FIELD_NUMBER: _ClassVar[int] + INPUT_DATA_FIELD_NUMBER: _ClassVar[int] + OUTPUT_URI_FIELD_NUMBER: _ClassVar[int] + ERROR_FIELD_NUMBER: _ClassVar[int] + OUTPUT_DATA_FIELD_NUMBER: _ClassVar[int] + CUSTOM_INFO_FIELD_NUMBER: _ClassVar[int] + PHASE_VERSION_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + REASONS_FIELD_NUMBER: _ClassVar[int] + TASK_TYPE_FIELD_NUMBER: _ClassVar[int] + METADATA_FIELD_NUMBER: _ClassVar[int] + EVENT_VERSION_FIELD_NUMBER: _ClassVar[int] + REPORTED_AT_FIELD_NUMBER: _ClassVar[int] + LOG_CONTEXT_FIELD_NUMBER: _ClassVar[int] + task_id: _identifier_pb2.Identifier + parent_node_execution_id: _identifier_pb2.NodeExecutionIdentifier + retry_attempt: int + phase: _execution_pb2.TaskExecution.Phase + producer_id: str + logs: _containers.RepeatedCompositeFieldContainer[_execution_pb2.TaskLog] + occurred_at: _timestamp_pb2.Timestamp + input_uri: str + input_data: _literals_pb2.LiteralMap + output_uri: str + error: _execution_pb2.ExecutionError + output_data: _literals_pb2.LiteralMap + custom_info: _struct_pb2.Struct + phase_version: int + reason: str + reasons: _containers.RepeatedCompositeFieldContainer[EventReason] + task_type: str + metadata: TaskExecutionMetadata + event_version: int + reported_at: _timestamp_pb2.Timestamp + log_context: _execution_pb2.LogContext + def __init__(self, task_id: _Optional[_Union[_identifier_pb2.Identifier, _Mapping]] = ..., parent_node_execution_id: _Optional[_Union[_identifier_pb2.NodeExecutionIdentifier, _Mapping]] = ..., retry_attempt: _Optional[int] = ..., phase: _Optional[_Union[_execution_pb2.TaskExecution.Phase, str]] = ..., producer_id: _Optional[str] = ..., logs: _Optional[_Iterable[_Union[_execution_pb2.TaskLog, _Mapping]]] = ..., occurred_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., input_uri: _Optional[str] = ..., input_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., output_uri: _Optional[str] = ..., error: _Optional[_Union[_execution_pb2.ExecutionError, _Mapping]] = ..., output_data: _Optional[_Union[_literals_pb2.LiteralMap, _Mapping]] = ..., custom_info: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., phase_version: _Optional[int] = ..., reason: _Optional[str] = ..., reasons: _Optional[_Iterable[_Union[EventReason, _Mapping]]] = ..., task_type: _Optional[str] = ..., metadata: _Optional[_Union[TaskExecutionMetadata, _Mapping]] = ..., event_version: _Optional[int] = ..., reported_at: _Optional[_Union[_timestamp_pb2.Timestamp, _Mapping]] = ..., log_context: _Optional[_Union[_execution_pb2.LogContext, _Mapping]] = ...) -> None: ... + +class ExternalResourceInfo(_message.Message): + __slots__ = ["external_id", "index", "retry_attempt", "phase", "cache_status", "logs", "workflow_node_metadata", "custom_info", "log_context"] + EXTERNAL_ID_FIELD_NUMBER: _ClassVar[int] + INDEX_FIELD_NUMBER: _ClassVar[int] + RETRY_ATTEMPT_FIELD_NUMBER: _ClassVar[int] + PHASE_FIELD_NUMBER: _ClassVar[int] + CACHE_STATUS_FIELD_NUMBER: _ClassVar[int] + LOGS_FIELD_NUMBER: _ClassVar[int] + WORKFLOW_NODE_METADATA_FIELD_NUMBER: _ClassVar[int] + CUSTOM_INFO_FIELD_NUMBER: _ClassVar[int] + LOG_CONTEXT_FIELD_NUMBER: _ClassVar[int] + external_id: str + index: int + retry_attempt: int + phase: _execution_pb2.TaskExecution.Phase + cache_status: _catalog_pb2.CatalogCacheStatus + logs: _containers.RepeatedCompositeFieldContainer[_execution_pb2.TaskLog] + workflow_node_metadata: WorkflowNodeMetadata + custom_info: _struct_pb2.Struct + log_context: _execution_pb2.LogContext + def __init__(self, external_id: _Optional[str] = ..., index: _Optional[int] = ..., retry_attempt: _Optional[int] = ..., phase: _Optional[_Union[_execution_pb2.TaskExecution.Phase, str]] = ..., cache_status: _Optional[_Union[_catalog_pb2.CatalogCacheStatus, str]] = ..., logs: _Optional[_Iterable[_Union[_execution_pb2.TaskLog, _Mapping]]] = ..., workflow_node_metadata: _Optional[_Union[WorkflowNodeMetadata, _Mapping]] = ..., custom_info: _Optional[_Union[_struct_pb2.Struct, _Mapping]] = ..., log_context: _Optional[_Union[_execution_pb2.LogContext, _Mapping]] = ...) -> None: ... + +class ResourcePoolInfo(_message.Message): + __slots__ = ["allocation_token", "namespace"] + ALLOCATION_TOKEN_FIELD_NUMBER: _ClassVar[int] + NAMESPACE_FIELD_NUMBER: _ClassVar[int] + allocation_token: str + namespace: str + def __init__(self, allocation_token: _Optional[str] = ..., namespace: _Optional[str] = ...) -> None: ... + +class TaskExecutionMetadata(_message.Message): + __slots__ = ["generated_name", "external_resources", "resource_pool_info", "plugin_identifier", "instance_class"] + class InstanceClass(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + DEFAULT: _ClassVar[TaskExecutionMetadata.InstanceClass] + INTERRUPTIBLE: _ClassVar[TaskExecutionMetadata.InstanceClass] + DEFAULT: TaskExecutionMetadata.InstanceClass + INTERRUPTIBLE: TaskExecutionMetadata.InstanceClass + GENERATED_NAME_FIELD_NUMBER: _ClassVar[int] + EXTERNAL_RESOURCES_FIELD_NUMBER: _ClassVar[int] + RESOURCE_POOL_INFO_FIELD_NUMBER: _ClassVar[int] + PLUGIN_IDENTIFIER_FIELD_NUMBER: _ClassVar[int] + INSTANCE_CLASS_FIELD_NUMBER: _ClassVar[int] + generated_name: str + external_resources: _containers.RepeatedCompositeFieldContainer[ExternalResourceInfo] + resource_pool_info: _containers.RepeatedCompositeFieldContainer[ResourcePoolInfo] + plugin_identifier: str + instance_class: TaskExecutionMetadata.InstanceClass + def __init__(self, generated_name: _Optional[str] = ..., external_resources: _Optional[_Iterable[_Union[ExternalResourceInfo, _Mapping]]] = ..., resource_pool_info: _Optional[_Iterable[_Union[ResourcePoolInfo, _Mapping]]] = ..., plugin_identifier: _Optional[str] = ..., instance_class: _Optional[_Union[TaskExecutionMetadata.InstanceClass, str]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/event/event_pb2_grpc.py b/gen/python/flyteidl2/event/event_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/event/event_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/common_pb2.py b/gen/python/flyteidl2/plugins/common_pb2.py new file mode 100644 index 0000000000..ada9e0b1da --- /dev/null +++ b/gen/python/flyteidl2/plugins/common_pb2.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/common.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import tasks_pb2 as flyteidl2_dot_core_dot_tasks__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl2/plugins/common.proto\x12\x11\x66lyteidl2.plugins\x1a\x1a\x66lyteidl2/core/tasks.proto\"\xc7\x01\n\x11\x43ommonReplicaSpec\x12\x1a\n\x08replicas\x18\x01 \x01(\x05R\x08replicas\x12\x14\n\x05image\x18\x02 \x01(\tR\x05image\x12\x37\n\tresources\x18\x03 \x01(\x0b\x32\x19.flyteidl2.core.ResourcesR\tresources\x12G\n\x0erestart_policy\x18\x04 \x01(\x0e\x32 .flyteidl2.plugins.RestartPolicyR\rrestartPolicy*c\n\rRestartPolicy\x12\x18\n\x14RESTART_POLICY_NEVER\x10\x00\x12\x1d\n\x19RESTART_POLICY_ON_FAILURE\x10\x01\x12\x19\n\x15RESTART_POLICY_ALWAYS\x10\x02\x42\xc2\x01\n\x15\x63om.flyteidl2.pluginsB\x0b\x43ommonProtoH\x02P\x01Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\xa2\x02\x03\x46PX\xaa\x02\x11\x46lyteidl2.Plugins\xca\x02\x11\x46lyteidl2\\Plugins\xe2\x02\x1d\x46lyteidl2\\Plugins\\GPBMetadata\xea\x02\x12\x46lyteidl2::Pluginsb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.common_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025com.flyteidl2.pluginsB\013CommonProtoH\002P\001Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\242\002\003FPX\252\002\021Flyteidl2.Plugins\312\002\021Flyteidl2\\Plugins\342\002\035Flyteidl2\\Plugins\\GPBMetadata\352\002\022Flyteidl2::Plugins' + _globals['_RESTARTPOLICY']._serialized_start=283 + _globals['_RESTARTPOLICY']._serialized_end=382 + _globals['_COMMONREPLICASPEC']._serialized_start=82 + _globals['_COMMONREPLICASPEC']._serialized_end=281 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/common_pb2.pyi b/gen/python/flyteidl2/plugins/common_pb2.pyi new file mode 100644 index 0000000000..b33b9d6d8b --- /dev/null +++ b/gen/python/flyteidl2/plugins/common_pb2.pyi @@ -0,0 +1,28 @@ +from flyteidl2.core import tasks_pb2 as _tasks_pb2 +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class RestartPolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + RESTART_POLICY_NEVER: _ClassVar[RestartPolicy] + RESTART_POLICY_ON_FAILURE: _ClassVar[RestartPolicy] + RESTART_POLICY_ALWAYS: _ClassVar[RestartPolicy] +RESTART_POLICY_NEVER: RestartPolicy +RESTART_POLICY_ON_FAILURE: RestartPolicy +RESTART_POLICY_ALWAYS: RestartPolicy + +class CommonReplicaSpec(_message.Message): + __slots__ = ["replicas", "image", "resources", "restart_policy"] + REPLICAS_FIELD_NUMBER: _ClassVar[int] + IMAGE_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] + RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] + replicas: int + image: str + resources: _tasks_pb2.Resources + restart_policy: RestartPolicy + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[RestartPolicy, str]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/common_pb2_grpc.py b/gen/python/flyteidl2/plugins/common_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/common_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/kubeflow/__init__.py b/gen/python/flyteidl2/plugins/kubeflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/gen/python/flyteidl2/plugins/kubeflow/common_pb2.py b/gen/python/flyteidl2/plugins/kubeflow/common_pb2.py new file mode 100644 index 0000000000..d2448a5578 --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/common_pb2.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/kubeflow/common.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\'flyteidl2/plugins/kubeflow/common.proto\x12\x1a\x66lyteidl2.plugins.kubeflow\"\xfb\x01\n\tRunPolicy\x12T\n\x10\x63lean_pod_policy\x18\x01 \x01(\x0e\x32*.flyteidl2.plugins.kubeflow.CleanPodPolicyR\x0e\x63leanPodPolicy\x12;\n\x1attl_seconds_after_finished\x18\x02 \x01(\x05R\x17ttlSecondsAfterFinished\x12\x36\n\x17\x61\x63tive_deadline_seconds\x18\x03 \x01(\x05R\x15\x61\x63tiveDeadlineSeconds\x12#\n\rbackoff_limit\x18\x04 \x01(\x05R\x0c\x62\x61\x63koffLimit*`\n\x0e\x43leanPodPolicy\x12\x18\n\x14\x43LEANPOD_POLICY_NONE\x10\x00\x12\x1b\n\x17\x43LEANPOD_POLICY_RUNNING\x10\x01\x12\x17\n\x13\x43LEANPOD_POLICY_ALL\x10\x02\x42\xf9\x01\n\x1e\x63om.flyteidl2.plugins.kubeflowB\x0b\x43ommonProtoH\x02P\x01Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\xa2\x02\x03\x46PK\xaa\x02\x1a\x46lyteidl2.Plugins.Kubeflow\xca\x02\x1a\x46lyteidl2\\Plugins\\Kubeflow\xe2\x02&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1c\x46lyteidl2::Plugins::Kubeflowb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.kubeflow.common_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\036com.flyteidl2.plugins.kubeflowB\013CommonProtoH\002P\001Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\242\002\003FPK\252\002\032Flyteidl2.Plugins.Kubeflow\312\002\032Flyteidl2\\Plugins\\Kubeflow\342\002&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\352\002\034Flyteidl2::Plugins::Kubeflow' + _globals['_CLEANPODPOLICY']._serialized_start=325 + _globals['_CLEANPODPOLICY']._serialized_end=421 + _globals['_RUNPOLICY']._serialized_start=72 + _globals['_RUNPOLICY']._serialized_end=323 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/kubeflow/common_pb2.pyi b/gen/python/flyteidl2/plugins/kubeflow/common_pb2.pyi new file mode 100644 index 0000000000..3052ab62bb --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/common_pb2.pyi @@ -0,0 +1,27 @@ +from google.protobuf.internal import enum_type_wrapper as _enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class CleanPodPolicy(int, metaclass=_enum_type_wrapper.EnumTypeWrapper): + __slots__ = [] + CLEANPOD_POLICY_NONE: _ClassVar[CleanPodPolicy] + CLEANPOD_POLICY_RUNNING: _ClassVar[CleanPodPolicy] + CLEANPOD_POLICY_ALL: _ClassVar[CleanPodPolicy] +CLEANPOD_POLICY_NONE: CleanPodPolicy +CLEANPOD_POLICY_RUNNING: CleanPodPolicy +CLEANPOD_POLICY_ALL: CleanPodPolicy + +class RunPolicy(_message.Message): + __slots__ = ["clean_pod_policy", "ttl_seconds_after_finished", "active_deadline_seconds", "backoff_limit"] + CLEAN_POD_POLICY_FIELD_NUMBER: _ClassVar[int] + TTL_SECONDS_AFTER_FINISHED_FIELD_NUMBER: _ClassVar[int] + ACTIVE_DEADLINE_SECONDS_FIELD_NUMBER: _ClassVar[int] + BACKOFF_LIMIT_FIELD_NUMBER: _ClassVar[int] + clean_pod_policy: CleanPodPolicy + ttl_seconds_after_finished: int + active_deadline_seconds: int + backoff_limit: int + def __init__(self, clean_pod_policy: _Optional[_Union[CleanPodPolicy, str]] = ..., ttl_seconds_after_finished: _Optional[int] = ..., active_deadline_seconds: _Optional[int] = ..., backoff_limit: _Optional[int] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/kubeflow/common_pb2_grpc.py b/gen/python/flyteidl2/plugins/kubeflow/common_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/common_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.py b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.py new file mode 100644 index 0000000000..c997fbd8d1 --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/kubeflow/mpi.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import tasks_pb2 as flyteidl2_dot_core_dot_tasks__pb2 +from flyteidl2.plugins import common_pb2 as flyteidl2_dot_plugins_dot_common__pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as flyteidl2_dot_plugins_dot_kubeflow_dot_common__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n$flyteidl2/plugins/kubeflow/mpi.proto\x12\x1a\x66lyteidl2.plugins.kubeflow\x1a\x1a\x66lyteidl2/core/tasks.proto\x1a\x1e\x66lyteidl2/plugins/common.proto\x1a\'flyteidl2/plugins/kubeflow/common.proto\"\xcc\x02\n\x1a\x44istributedMPITrainingTask\x12\x66\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32=.flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x0eworkerReplicas\x12j\n\x11launcher_replicas\x18\x02 \x01(\x0b\x32=.flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpecR\x10launcherReplicas\x12\x44\n\nrun_policy\x18\x03 \x01(\x0b\x32%.flyteidl2.plugins.kubeflow.RunPolicyR\trunPolicy\x12\x14\n\x05slots\x18\x04 \x01(\x05R\x05slots\"\xbf\x02\n!DistributedMPITrainingReplicaSpec\x12\x1e\n\x08replicas\x18\x01 \x01(\x05\x42\x02\x18\x01R\x08replicas\x12\x18\n\x05image\x18\x02 \x01(\tB\x02\x18\x01R\x05image\x12;\n\tresources\x18\x03 \x01(\x0b\x32\x19.flyteidl2.core.ResourcesB\x02\x18\x01R\tresources\x12K\n\x0erestart_policy\x18\x04 \x01(\x0e\x32 .flyteidl2.plugins.RestartPolicyB\x02\x18\x01R\rrestartPolicy\x12\x18\n\x07\x63ommand\x18\x05 \x03(\tR\x07\x63ommand\x12<\n\x06\x63ommon\x18\x06 \x01(\x0b\x32$.flyteidl2.plugins.CommonReplicaSpecR\x06\x63ommonB\xf6\x01\n\x1e\x63om.flyteidl2.plugins.kubeflowB\x08MpiProtoH\x02P\x01Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\xa2\x02\x03\x46PK\xaa\x02\x1a\x46lyteidl2.Plugins.Kubeflow\xca\x02\x1a\x46lyteidl2\\Plugins\\Kubeflow\xe2\x02&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1c\x46lyteidl2::Plugins::Kubeflowb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.kubeflow.mpi_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\036com.flyteidl2.plugins.kubeflowB\010MpiProtoH\002P\001Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\242\002\003FPK\252\002\032Flyteidl2.Plugins.Kubeflow\312\002\032Flyteidl2\\Plugins\\Kubeflow\342\002&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\352\002\034Flyteidl2::Plugins::Kubeflow' + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['replicas']._options = None + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['replicas']._serialized_options = b'\030\001' + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['image']._options = None + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['image']._serialized_options = b'\030\001' + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['resources']._options = None + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['resources']._serialized_options = b'\030\001' + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['restart_policy']._options = None + _DISTRIBUTEDMPITRAININGREPLICASPEC.fields_by_name['restart_policy']._serialized_options = b'\030\001' + _globals['_DISTRIBUTEDMPITRAININGTASK']._serialized_start=170 + _globals['_DISTRIBUTEDMPITRAININGTASK']._serialized_end=502 + _globals['_DISTRIBUTEDMPITRAININGREPLICASPEC']._serialized_start=505 + _globals['_DISTRIBUTEDMPITRAININGREPLICASPEC']._serialized_end=824 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.pyi b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.pyi new file mode 100644 index 0000000000..3978bd61e7 --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2.pyi @@ -0,0 +1,37 @@ +from flyteidl2.core import tasks_pb2 as _tasks_pb2 +from flyteidl2.plugins import common_pb2 as _common_pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as _common_pb2_1 +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedMPITrainingTask(_message.Message): + __slots__ = ["worker_replicas", "launcher_replicas", "run_policy", "slots"] + WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + LAUNCHER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + RUN_POLICY_FIELD_NUMBER: _ClassVar[int] + SLOTS_FIELD_NUMBER: _ClassVar[int] + worker_replicas: DistributedMPITrainingReplicaSpec + launcher_replicas: DistributedMPITrainingReplicaSpec + run_policy: _common_pb2_1.RunPolicy + slots: int + def __init__(self, worker_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., launcher_replicas: _Optional[_Union[DistributedMPITrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2_1.RunPolicy, _Mapping]] = ..., slots: _Optional[int] = ...) -> None: ... + +class DistributedMPITrainingReplicaSpec(_message.Message): + __slots__ = ["replicas", "image", "resources", "restart_policy", "command", "common"] + REPLICAS_FIELD_NUMBER: _ClassVar[int] + IMAGE_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] + RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] + COMMAND_FIELD_NUMBER: _ClassVar[int] + COMMON_FIELD_NUMBER: _ClassVar[int] + replicas: int + image: str + resources: _tasks_pb2.Resources + restart_policy: _common_pb2.RestartPolicy + command: _containers.RepeatedScalarFieldContainer[str] + common: _common_pb2.CommonReplicaSpec + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ..., command: _Optional[_Iterable[str]] = ..., common: _Optional[_Union[_common_pb2.CommonReplicaSpec, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2_grpc.py b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/mpi_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.py b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.py new file mode 100644 index 0000000000..2b4ab6b4ee --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/kubeflow/pytorch.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import tasks_pb2 as flyteidl2_dot_core_dot_tasks__pb2 +from flyteidl2.plugins import common_pb2 as flyteidl2_dot_plugins_dot_common__pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as flyteidl2_dot_plugins_dot_kubeflow_dot_common__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n(flyteidl2/plugins/kubeflow/pytorch.proto\x12\x1a\x66lyteidl2.plugins.kubeflow\x1a\x1a\x66lyteidl2/core/tasks.proto\x1a\x1e\x66lyteidl2/plugins/common.proto\x1a\'flyteidl2/plugins/kubeflow/common.proto\"\xc1\x01\n\rElasticConfig\x12!\n\x0crdzv_backend\x18\x01 \x01(\tR\x0brdzvBackend\x12!\n\x0cmin_replicas\x18\x02 \x01(\x05R\x0bminReplicas\x12!\n\x0cmax_replicas\x18\x03 \x01(\x05R\x0bmaxReplicas\x12$\n\x0enproc_per_node\x18\x04 \x01(\x05R\x0cnprocPerNode\x12!\n\x0cmax_restarts\x18\x05 \x01(\x05R\x0bmaxRestarts\"\x90\x03\n\x1e\x44istributedPyTorchTrainingTask\x12j\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32\x41.flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0eworkerReplicas\x12j\n\x0fmaster_replicas\x18\x02 \x01(\x0b\x32\x41.flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpecR\x0emasterReplicas\x12\x44\n\nrun_policy\x18\x03 \x01(\x0b\x32%.flyteidl2.plugins.kubeflow.RunPolicyR\trunPolicy\x12P\n\x0e\x65lastic_config\x18\x04 \x01(\x0b\x32).flyteidl2.plugins.kubeflow.ElasticConfigR\relasticConfig\"\xa9\x02\n%DistributedPyTorchTrainingReplicaSpec\x12\x1e\n\x08replicas\x18\x01 \x01(\x05\x42\x02\x18\x01R\x08replicas\x12\x18\n\x05image\x18\x02 \x01(\tB\x02\x18\x01R\x05image\x12;\n\tresources\x18\x03 \x01(\x0b\x32\x19.flyteidl2.core.ResourcesB\x02\x18\x01R\tresources\x12K\n\x0erestart_policy\x18\x04 \x01(\x0e\x32 .flyteidl2.plugins.RestartPolicyB\x02\x18\x01R\rrestartPolicy\x12<\n\x06\x63ommon\x18\x05 \x01(\x0b\x32$.flyteidl2.plugins.CommonReplicaSpecR\x06\x63ommonB\xfa\x01\n\x1e\x63om.flyteidl2.plugins.kubeflowB\x0cPytorchProtoH\x02P\x01Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\xa2\x02\x03\x46PK\xaa\x02\x1a\x46lyteidl2.Plugins.Kubeflow\xca\x02\x1a\x46lyteidl2\\Plugins\\Kubeflow\xe2\x02&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1c\x46lyteidl2::Plugins::Kubeflowb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.kubeflow.pytorch_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\036com.flyteidl2.plugins.kubeflowB\014PytorchProtoH\002P\001Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\242\002\003FPK\252\002\032Flyteidl2.Plugins.Kubeflow\312\002\032Flyteidl2\\Plugins\\Kubeflow\342\002&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\352\002\034Flyteidl2::Plugins::Kubeflow' + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['replicas']._options = None + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['replicas']._serialized_options = b'\030\001' + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['image']._options = None + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['image']._serialized_options = b'\030\001' + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['resources']._options = None + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['resources']._serialized_options = b'\030\001' + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['restart_policy']._options = None + _DISTRIBUTEDPYTORCHTRAININGREPLICASPEC.fields_by_name['restart_policy']._serialized_options = b'\030\001' + _globals['_ELASTICCONFIG']._serialized_start=174 + _globals['_ELASTICCONFIG']._serialized_end=367 + _globals['_DISTRIBUTEDPYTORCHTRAININGTASK']._serialized_start=370 + _globals['_DISTRIBUTEDPYTORCHTRAININGTASK']._serialized_end=770 + _globals['_DISTRIBUTEDPYTORCHTRAININGREPLICASPEC']._serialized_start=773 + _globals['_DISTRIBUTEDPYTORCHTRAININGREPLICASPEC']._serialized_end=1070 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.pyi b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.pyi new file mode 100644 index 0000000000..99bc167c2d --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2.pyi @@ -0,0 +1,48 @@ +from flyteidl2.core import tasks_pb2 as _tasks_pb2 +from flyteidl2.plugins import common_pb2 as _common_pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as _common_pb2_1 +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ElasticConfig(_message.Message): + __slots__ = ["rdzv_backend", "min_replicas", "max_replicas", "nproc_per_node", "max_restarts"] + RDZV_BACKEND_FIELD_NUMBER: _ClassVar[int] + MIN_REPLICAS_FIELD_NUMBER: _ClassVar[int] + MAX_REPLICAS_FIELD_NUMBER: _ClassVar[int] + NPROC_PER_NODE_FIELD_NUMBER: _ClassVar[int] + MAX_RESTARTS_FIELD_NUMBER: _ClassVar[int] + rdzv_backend: str + min_replicas: int + max_replicas: int + nproc_per_node: int + max_restarts: int + def __init__(self, rdzv_backend: _Optional[str] = ..., min_replicas: _Optional[int] = ..., max_replicas: _Optional[int] = ..., nproc_per_node: _Optional[int] = ..., max_restarts: _Optional[int] = ...) -> None: ... + +class DistributedPyTorchTrainingTask(_message.Message): + __slots__ = ["worker_replicas", "master_replicas", "run_policy", "elastic_config"] + WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + MASTER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + RUN_POLICY_FIELD_NUMBER: _ClassVar[int] + ELASTIC_CONFIG_FIELD_NUMBER: _ClassVar[int] + worker_replicas: DistributedPyTorchTrainingReplicaSpec + master_replicas: DistributedPyTorchTrainingReplicaSpec + run_policy: _common_pb2_1.RunPolicy + elastic_config: ElasticConfig + def __init__(self, worker_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., master_replicas: _Optional[_Union[DistributedPyTorchTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2_1.RunPolicy, _Mapping]] = ..., elastic_config: _Optional[_Union[ElasticConfig, _Mapping]] = ...) -> None: ... + +class DistributedPyTorchTrainingReplicaSpec(_message.Message): + __slots__ = ["replicas", "image", "resources", "restart_policy", "common"] + REPLICAS_FIELD_NUMBER: _ClassVar[int] + IMAGE_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] + RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] + COMMON_FIELD_NUMBER: _ClassVar[int] + replicas: int + image: str + resources: _tasks_pb2.Resources + restart_policy: _common_pb2.RestartPolicy + common: _common_pb2.CommonReplicaSpec + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ..., common: _Optional[_Union[_common_pb2.CommonReplicaSpec, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2_grpc.py b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/pytorch_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.py b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.py new file mode 100644 index 0000000000..8edef5e56c --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/kubeflow/tensorflow.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from flyteidl2.core import tasks_pb2 as flyteidl2_dot_core_dot_tasks__pb2 +from flyteidl2.plugins import common_pb2 as flyteidl2_dot_plugins_dot_common__pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as flyteidl2_dot_plugins_dot_kubeflow_dot_common__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n+flyteidl2/plugins/kubeflow/tensorflow.proto\x12\x1a\x66lyteidl2.plugins.kubeflow\x1a\x1a\x66lyteidl2/core/tasks.proto\x1a\x1e\x66lyteidl2/plugins/common.proto\x1a\'flyteidl2/plugins/kubeflow/common.proto\"\xa1\x04\n!DistributedTensorflowTrainingTask\x12m\n\x0fworker_replicas\x18\x01 \x01(\x0b\x32\x44.flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\x0eworkerReplicas\x12\x65\n\x0bps_replicas\x18\x02 \x01(\x0b\x32\x44.flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\npsReplicas\x12k\n\x0e\x63hief_replicas\x18\x03 \x01(\x0b\x32\x44.flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\rchiefReplicas\x12\x44\n\nrun_policy\x18\x04 \x01(\x0b\x32%.flyteidl2.plugins.kubeflow.RunPolicyR\trunPolicy\x12s\n\x12\x65valuator_replicas\x18\x05 \x01(\x0b\x32\x44.flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpecR\x11\x65valuatorReplicas\"\xac\x02\n(DistributedTensorflowTrainingReplicaSpec\x12\x1e\n\x08replicas\x18\x01 \x01(\x05\x42\x02\x18\x01R\x08replicas\x12\x18\n\x05image\x18\x02 \x01(\tB\x02\x18\x01R\x05image\x12;\n\tresources\x18\x03 \x01(\x0b\x32\x19.flyteidl2.core.ResourcesB\x02\x18\x01R\tresources\x12K\n\x0erestart_policy\x18\x04 \x01(\x0e\x32 .flyteidl2.plugins.RestartPolicyB\x02\x18\x01R\rrestartPolicy\x12<\n\x06\x63ommon\x18\x05 \x01(\x0b\x32$.flyteidl2.plugins.CommonReplicaSpecR\x06\x63ommonB\xfd\x01\n\x1e\x63om.flyteidl2.plugins.kubeflowB\x0fTensorflowProtoH\x02P\x01Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\xa2\x02\x03\x46PK\xaa\x02\x1a\x46lyteidl2.Plugins.Kubeflow\xca\x02\x1a\x46lyteidl2\\Plugins\\Kubeflow\xe2\x02&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\xea\x02\x1c\x46lyteidl2::Plugins::Kubeflowb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.kubeflow.tensorflow_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\036com.flyteidl2.plugins.kubeflowB\017TensorflowProtoH\002P\001Z>github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins/kubeflow\242\002\003FPK\252\002\032Flyteidl2.Plugins.Kubeflow\312\002\032Flyteidl2\\Plugins\\Kubeflow\342\002&Flyteidl2\\Plugins\\Kubeflow\\GPBMetadata\352\002\034Flyteidl2::Plugins::Kubeflow' + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['replicas']._options = None + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['replicas']._serialized_options = b'\030\001' + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['image']._options = None + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['image']._serialized_options = b'\030\001' + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['resources']._options = None + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['resources']._serialized_options = b'\030\001' + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['restart_policy']._options = None + _DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC.fields_by_name['restart_policy']._serialized_options = b'\030\001' + _globals['_DISTRIBUTEDTENSORFLOWTRAININGTASK']._serialized_start=177 + _globals['_DISTRIBUTEDTENSORFLOWTRAININGTASK']._serialized_end=722 + _globals['_DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC']._serialized_start=725 + _globals['_DISTRIBUTEDTENSORFLOWTRAININGREPLICASPEC']._serialized_end=1025 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.pyi b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.pyi new file mode 100644 index 0000000000..afb6399141 --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2.pyi @@ -0,0 +1,36 @@ +from flyteidl2.core import tasks_pb2 as _tasks_pb2 +from flyteidl2.plugins import common_pb2 as _common_pb2 +from flyteidl2.plugins.kubeflow import common_pb2 as _common_pb2_1 +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedTensorflowTrainingTask(_message.Message): + __slots__ = ["worker_replicas", "ps_replicas", "chief_replicas", "run_policy", "evaluator_replicas"] + WORKER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + PS_REPLICAS_FIELD_NUMBER: _ClassVar[int] + CHIEF_REPLICAS_FIELD_NUMBER: _ClassVar[int] + RUN_POLICY_FIELD_NUMBER: _ClassVar[int] + EVALUATOR_REPLICAS_FIELD_NUMBER: _ClassVar[int] + worker_replicas: DistributedTensorflowTrainingReplicaSpec + ps_replicas: DistributedTensorflowTrainingReplicaSpec + chief_replicas: DistributedTensorflowTrainingReplicaSpec + run_policy: _common_pb2_1.RunPolicy + evaluator_replicas: DistributedTensorflowTrainingReplicaSpec + def __init__(self, worker_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., ps_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., chief_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ..., run_policy: _Optional[_Union[_common_pb2_1.RunPolicy, _Mapping]] = ..., evaluator_replicas: _Optional[_Union[DistributedTensorflowTrainingReplicaSpec, _Mapping]] = ...) -> None: ... + +class DistributedTensorflowTrainingReplicaSpec(_message.Message): + __slots__ = ["replicas", "image", "resources", "restart_policy", "common"] + REPLICAS_FIELD_NUMBER: _ClassVar[int] + IMAGE_FIELD_NUMBER: _ClassVar[int] + RESOURCES_FIELD_NUMBER: _ClassVar[int] + RESTART_POLICY_FIELD_NUMBER: _ClassVar[int] + COMMON_FIELD_NUMBER: _ClassVar[int] + replicas: int + image: str + resources: _tasks_pb2.Resources + restart_policy: _common_pb2.RestartPolicy + common: _common_pb2.CommonReplicaSpec + def __init__(self, replicas: _Optional[int] = ..., image: _Optional[str] = ..., resources: _Optional[_Union[_tasks_pb2.Resources, _Mapping]] = ..., restart_policy: _Optional[_Union[_common_pb2.RestartPolicy, str]] = ..., common: _Optional[_Union[_common_pb2.CommonReplicaSpec, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2_grpc.py b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/kubeflow/tensorflow_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/mpi_pb2.py b/gen/python/flyteidl2/plugins/mpi_pb2.py new file mode 100644 index 0000000000..2bbc284f3d --- /dev/null +++ b/gen/python/flyteidl2/plugins/mpi_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/mpi.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1b\x66lyteidl2/plugins/mpi.proto\x12\x11\x66lyteidl2.plugins\"\x87\x01\n\x1a\x44istributedMPITrainingTask\x12\x1f\n\x0bnum_workers\x18\x01 \x01(\x05R\nnumWorkers\x12\x32\n\x15num_launcher_replicas\x18\x02 \x01(\x05R\x13numLauncherReplicas\x12\x14\n\x05slots\x18\x03 \x01(\x05R\x05slotsB\xbf\x01\n\x15\x63om.flyteidl2.pluginsB\x08MpiProtoH\x02P\x01Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\xa2\x02\x03\x46PX\xaa\x02\x11\x46lyteidl2.Plugins\xca\x02\x11\x46lyteidl2\\Plugins\xe2\x02\x1d\x46lyteidl2\\Plugins\\GPBMetadata\xea\x02\x12\x46lyteidl2::Pluginsb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.mpi_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025com.flyteidl2.pluginsB\010MpiProtoH\002P\001Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\242\002\003FPX\252\002\021Flyteidl2.Plugins\312\002\021Flyteidl2\\Plugins\342\002\035Flyteidl2\\Plugins\\GPBMetadata\352\002\022Flyteidl2::Plugins' + _globals['_DISTRIBUTEDMPITRAININGTASK']._serialized_start=51 + _globals['_DISTRIBUTEDMPITRAININGTASK']._serialized_end=186 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/mpi_pb2.pyi b/gen/python/flyteidl2/plugins/mpi_pb2.pyi new file mode 100644 index 0000000000..b907bede41 --- /dev/null +++ b/gen/python/flyteidl2/plugins/mpi_pb2.pyi @@ -0,0 +1,15 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedMPITrainingTask(_message.Message): + __slots__ = ["num_workers", "num_launcher_replicas", "slots"] + NUM_WORKERS_FIELD_NUMBER: _ClassVar[int] + NUM_LAUNCHER_REPLICAS_FIELD_NUMBER: _ClassVar[int] + SLOTS_FIELD_NUMBER: _ClassVar[int] + num_workers: int + num_launcher_replicas: int + slots: int + def __init__(self, num_workers: _Optional[int] = ..., num_launcher_replicas: _Optional[int] = ..., slots: _Optional[int] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/mpi_pb2_grpc.py b/gen/python/flyteidl2/plugins/mpi_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/mpi_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/presto_pb2.py b/gen/python/flyteidl2/plugins/presto_pb2.py new file mode 100644 index 0000000000..08bcd4d565 --- /dev/null +++ b/gen/python/flyteidl2/plugins/presto_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/presto.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl2/plugins/presto.proto\x12\x11\x66lyteidl2.plugins\"\x82\x01\n\x0bPrestoQuery\x12#\n\rrouting_group\x18\x01 \x01(\tR\x0croutingGroup\x12\x18\n\x07\x63\x61talog\x18\x02 \x01(\tR\x07\x63\x61talog\x12\x16\n\x06schema\x18\x03 \x01(\tR\x06schema\x12\x1c\n\tstatement\x18\x04 \x01(\tR\tstatementB\xc2\x01\n\x15\x63om.flyteidl2.pluginsB\x0bPrestoProtoH\x02P\x01Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\xa2\x02\x03\x46PX\xaa\x02\x11\x46lyteidl2.Plugins\xca\x02\x11\x46lyteidl2\\Plugins\xe2\x02\x1d\x46lyteidl2\\Plugins\\GPBMetadata\xea\x02\x12\x46lyteidl2::Pluginsb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.presto_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025com.flyteidl2.pluginsB\013PrestoProtoH\002P\001Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\242\002\003FPX\252\002\021Flyteidl2.Plugins\312\002\021Flyteidl2\\Plugins\342\002\035Flyteidl2\\Plugins\\GPBMetadata\352\002\022Flyteidl2::Plugins' + _globals['_PRESTOQUERY']._serialized_start=54 + _globals['_PRESTOQUERY']._serialized_end=184 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/presto_pb2.pyi b/gen/python/flyteidl2/plugins/presto_pb2.pyi new file mode 100644 index 0000000000..6f185403e4 --- /dev/null +++ b/gen/python/flyteidl2/plugins/presto_pb2.pyi @@ -0,0 +1,17 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class PrestoQuery(_message.Message): + __slots__ = ["routing_group", "catalog", "schema", "statement"] + ROUTING_GROUP_FIELD_NUMBER: _ClassVar[int] + CATALOG_FIELD_NUMBER: _ClassVar[int] + SCHEMA_FIELD_NUMBER: _ClassVar[int] + STATEMENT_FIELD_NUMBER: _ClassVar[int] + routing_group: str + catalog: str + schema: str + statement: str + def __init__(self, routing_group: _Optional[str] = ..., catalog: _Optional[str] = ..., schema: _Optional[str] = ..., statement: _Optional[str] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/presto_pb2_grpc.py b/gen/python/flyteidl2/plugins/presto_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/presto_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/qubole_pb2.py b/gen/python/flyteidl2/plugins/qubole_pb2.py new file mode 100644 index 0000000000..2cc97e4fc8 --- /dev/null +++ b/gen/python/flyteidl2/plugins/qubole_pb2.py @@ -0,0 +1,32 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/qubole.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1e\x66lyteidl2/plugins/qubole.proto\x12\x11\x66lyteidl2.plugins\"b\n\tHiveQuery\x12\x14\n\x05query\x18\x01 \x01(\tR\x05query\x12\x1f\n\x0btimeout_sec\x18\x02 \x01(\rR\ntimeoutSec\x12\x1e\n\nretryCount\x18\x03 \x01(\rR\nretryCount\"M\n\x13HiveQueryCollection\x12\x36\n\x07queries\x18\x02 \x03(\x0b\x32\x1c.flyteidl2.plugins.HiveQueryR\x07queries\"\xd3\x01\n\rQuboleHiveJob\x12#\n\rcluster_label\x18\x01 \x01(\tR\x0c\x63lusterLabel\x12U\n\x10query_collection\x18\x02 \x01(\x0b\x32&.flyteidl2.plugins.HiveQueryCollectionB\x02\x18\x01R\x0fqueryCollection\x12\x12\n\x04tags\x18\x03 \x03(\tR\x04tags\x12\x32\n\x05query\x18\x04 \x01(\x0b\x32\x1c.flyteidl2.plugins.HiveQueryR\x05queryB\xc2\x01\n\x15\x63om.flyteidl2.pluginsB\x0bQuboleProtoH\x02P\x01Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\xa2\x02\x03\x46PX\xaa\x02\x11\x46lyteidl2.Plugins\xca\x02\x11\x46lyteidl2\\Plugins\xe2\x02\x1d\x46lyteidl2\\Plugins\\GPBMetadata\xea\x02\x12\x46lyteidl2::Pluginsb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.qubole_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025com.flyteidl2.pluginsB\013QuboleProtoH\002P\001Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\242\002\003FPX\252\002\021Flyteidl2.Plugins\312\002\021Flyteidl2\\Plugins\342\002\035Flyteidl2\\Plugins\\GPBMetadata\352\002\022Flyteidl2::Plugins' + _QUBOLEHIVEJOB.fields_by_name['query_collection']._options = None + _QUBOLEHIVEJOB.fields_by_name['query_collection']._serialized_options = b'\030\001' + _globals['_HIVEQUERY']._serialized_start=53 + _globals['_HIVEQUERY']._serialized_end=151 + _globals['_HIVEQUERYCOLLECTION']._serialized_start=153 + _globals['_HIVEQUERYCOLLECTION']._serialized_end=230 + _globals['_QUBOLEHIVEJOB']._serialized_start=233 + _globals['_QUBOLEHIVEJOB']._serialized_end=444 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/qubole_pb2.pyi b/gen/python/flyteidl2/plugins/qubole_pb2.pyi new file mode 100644 index 0000000000..71e6bd6698 --- /dev/null +++ b/gen/python/flyteidl2/plugins/qubole_pb2.pyi @@ -0,0 +1,34 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class HiveQuery(_message.Message): + __slots__ = ["query", "timeout_sec", "retryCount"] + QUERY_FIELD_NUMBER: _ClassVar[int] + TIMEOUT_SEC_FIELD_NUMBER: _ClassVar[int] + RETRYCOUNT_FIELD_NUMBER: _ClassVar[int] + query: str + timeout_sec: int + retryCount: int + def __init__(self, query: _Optional[str] = ..., timeout_sec: _Optional[int] = ..., retryCount: _Optional[int] = ...) -> None: ... + +class HiveQueryCollection(_message.Message): + __slots__ = ["queries"] + QUERIES_FIELD_NUMBER: _ClassVar[int] + queries: _containers.RepeatedCompositeFieldContainer[HiveQuery] + def __init__(self, queries: _Optional[_Iterable[_Union[HiveQuery, _Mapping]]] = ...) -> None: ... + +class QuboleHiveJob(_message.Message): + __slots__ = ["cluster_label", "query_collection", "tags", "query"] + CLUSTER_LABEL_FIELD_NUMBER: _ClassVar[int] + QUERY_COLLECTION_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + QUERY_FIELD_NUMBER: _ClassVar[int] + cluster_label: str + query_collection: HiveQueryCollection + tags: _containers.RepeatedScalarFieldContainer[str] + query: HiveQuery + def __init__(self, cluster_label: _Optional[str] = ..., query_collection: _Optional[_Union[HiveQueryCollection, _Mapping]] = ..., tags: _Optional[_Iterable[str]] = ..., query: _Optional[_Union[HiveQuery, _Mapping]] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/qubole_pb2_grpc.py b/gen/python/flyteidl2/plugins/qubole_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/qubole_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/python/flyteidl2/plugins/tensorflow_pb2.py b/gen/python/flyteidl2/plugins/tensorflow_pb2.py new file mode 100644 index 0000000000..65c56353b1 --- /dev/null +++ b/gen/python/flyteidl2/plugins/tensorflow_pb2.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: flyteidl2/plugins/tensorflow.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\"flyteidl2/plugins/tensorflow.proto\x12\x11\x66lyteidl2.plugins\"\xb4\x01\n!DistributedTensorflowTrainingTask\x12\x18\n\x07workers\x18\x01 \x01(\x05R\x07workers\x12\x1f\n\x0bps_replicas\x18\x02 \x01(\x05R\npsReplicas\x12%\n\x0e\x63hief_replicas\x18\x03 \x01(\x05R\rchiefReplicas\x12-\n\x12\x65valuator_replicas\x18\x04 \x01(\x05R\x11\x65valuatorReplicasB\xc6\x01\n\x15\x63om.flyteidl2.pluginsB\x0fTensorflowProtoH\x02P\x01Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\xa2\x02\x03\x46PX\xaa\x02\x11\x46lyteidl2.Plugins\xca\x02\x11\x46lyteidl2\\Plugins\xe2\x02\x1d\x46lyteidl2\\Plugins\\GPBMetadata\xea\x02\x12\x46lyteidl2::Pluginsb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'flyteidl2.plugins.tensorflow_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\025com.flyteidl2.pluginsB\017TensorflowProtoH\002P\001Z5github.com/flyteorg/flyte/v2/gen/go/flyteidl2/plugins\242\002\003FPX\252\002\021Flyteidl2.Plugins\312\002\021Flyteidl2\\Plugins\342\002\035Flyteidl2\\Plugins\\GPBMetadata\352\002\022Flyteidl2::Plugins' + _globals['_DISTRIBUTEDTENSORFLOWTRAININGTASK']._serialized_start=58 + _globals['_DISTRIBUTEDTENSORFLOWTRAININGTASK']._serialized_end=238 +# @@protoc_insertion_point(module_scope) diff --git a/gen/python/flyteidl2/plugins/tensorflow_pb2.pyi b/gen/python/flyteidl2/plugins/tensorflow_pb2.pyi new file mode 100644 index 0000000000..81e2bc30b9 --- /dev/null +++ b/gen/python/flyteidl2/plugins/tensorflow_pb2.pyi @@ -0,0 +1,17 @@ +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Optional as _Optional + +DESCRIPTOR: _descriptor.FileDescriptor + +class DistributedTensorflowTrainingTask(_message.Message): + __slots__ = ["workers", "ps_replicas", "chief_replicas", "evaluator_replicas"] + WORKERS_FIELD_NUMBER: _ClassVar[int] + PS_REPLICAS_FIELD_NUMBER: _ClassVar[int] + CHIEF_REPLICAS_FIELD_NUMBER: _ClassVar[int] + EVALUATOR_REPLICAS_FIELD_NUMBER: _ClassVar[int] + workers: int + ps_replicas: int + chief_replicas: int + evaluator_replicas: int + def __init__(self, workers: _Optional[int] = ..., ps_replicas: _Optional[int] = ..., chief_replicas: _Optional[int] = ..., evaluator_replicas: _Optional[int] = ...) -> None: ... diff --git a/gen/python/flyteidl2/plugins/tensorflow_pb2_grpc.py b/gen/python/flyteidl2/plugins/tensorflow_pb2_grpc.py new file mode 100644 index 0000000000..2daafffebf --- /dev/null +++ b/gen/python/flyteidl2/plugins/tensorflow_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/gen/rust/Cargo.lock b/gen/rust/Cargo.lock index 7c31ad4237..bfb247d789 100644 --- a/gen/rust/Cargo.lock +++ b/gen/rust/Cargo.lock @@ -1214,9 +1214,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.109" +version = "2.0.110" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f17c7e013e88258aa9543dcbe81aca68a667a9ac37cd69c9fbc07858bfe0e2f" +checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" dependencies = [ "proc-macro2", "quote", diff --git a/gen/rust/src/flyteidl2.cacheservice.rs b/gen/rust/src/flyteidl2.cacheservice.rs new file mode 100644 index 0000000000..a253cb31e9 --- /dev/null +++ b/gen/rust/src/flyteidl2.cacheservice.rs @@ -0,0 +1,657 @@ +// @generated +// This file is @generated by prost-build. +/// +/// Additional metadata as key-value pairs +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyMapMetadata { + /// Additional metadata as key-value pairs + #[prost(map="string, string", tag="1")] + pub values: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +/// +/// Metadata for cached outputs, including the source identifier and timestamps. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// Source task or workflow identifier + #[prost(message, optional, tag="1")] + pub source_identifier: ::core::option::Option, + /// Additional metadata as key-value pairs + #[prost(message, optional, tag="2")] + pub key_map: ::core::option::Option, + /// Creation timestamp + #[prost(message, optional, tag="3")] + pub created_at: ::core::option::Option, + /// Last update timestamp + #[prost(message, optional, tag="4")] + pub last_updated_at: ::core::option::Option, +} +/// +/// Represents cached output, either as literals or an URI, with associated metadata. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CachedOutput { + /// Associated metadata + #[prost(message, optional, tag="3")] + pub metadata: ::core::option::Option, + #[prost(oneof="cached_output::Output", tags="1, 2")] + pub output: ::core::option::Option, +} +/// Nested message and enum types in `CachedOutput`. +pub mod cached_output { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Output { + /// Output literals + #[prost(message, tag="1")] + OutputLiterals(super::super::core::LiteralMap), + /// URI to output data + #[prost(string, tag="2")] + OutputUri(::prost::alloc::string::String), + } +} +/// +/// Request to retrieve cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCacheRequest { + /// Cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, +} +/// +/// Response with cached data for a given key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCacheResponse { + /// Cached output + #[prost(message, optional, tag="1")] + pub output: ::core::option::Option, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct OverwriteOutput { + /// Overwrite flag + #[prost(bool, tag="1")] + pub overwrite: bool, + /// Delete existing blob + #[prost(bool, tag="2")] + pub delete_blob: bool, + /// Maximum age of the cached output since last update + #[prost(message, optional, tag="3")] + pub max_age: ::core::option::Option, +} +/// +/// Request to store/update cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PutCacheRequest { + /// Cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + /// Output to cache + #[prost(message, optional, tag="2")] + pub output: ::core::option::Option, + /// Overwrite flag if exists + #[prost(message, optional, tag="3")] + pub overwrite: ::core::option::Option, +} +/// +/// Response message of cache store/update operation. +/// +/// Empty, success indicated by no errors +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct PutCacheResponse { +} +/// +/// Request to delete cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteCacheRequest { + /// Cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, +} +/// +/// Response message of cache deletion operation. +/// +/// Empty, success indicated by no errors +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct DeleteCacheResponse { +} +/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Reservation { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag="3")] + pub heartbeat_interval: ::core::option::Option, + /// Expiration timestamp of this reservation + #[prost(message, optional, tag="4")] + pub expires_at: ::core::option::Option, +} +/// +/// Request to get or extend a reservation for a cache key +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationRequest { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag="3")] + pub heartbeat_interval: ::core::option::Option, +} +/// +/// Request to get or extend a reservation for a cache key +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationResponse { + /// The reservation that was created or extended + #[prost(message, optional, tag="1")] + pub reservation: ::core::option::Option, +} +/// +/// Request to release the reservation for a cache key +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationRequest { + /// The unique ID for the reservation - same as the cache key + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, +} +/// +/// Response message of release reservation operation. +/// +/// Empty, success indicated by no errors +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReleaseReservationResponse { +} +/// Encoded file descriptor set for the `flyteidl2.cacheservice` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0xac, 0x38, 0x0a, 0x29, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x16, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x97, 0x01, 0x0a, 0x0e, 0x4b, 0x65, 0x79, 0x4d, + 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4a, 0x0a, 0x06, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, + 0x01, 0x22, 0x93, 0x02, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x47, + 0x0a, 0x11, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x10, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x3f, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, + 0x61, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x39, 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x64, 0x41, 0x74, 0x12, 0x42, 0x0a, 0x0f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0d, 0x6c, 0x61, 0x73, 0x74, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0xbe, 0x01, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x0f, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, + 0x0e, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x12, + 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, + 0x12, 0x3c, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x42, 0x08, + 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, 0x23, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x50, 0x0a, + 0x10, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3c, 0x0a, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x64, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x22, + 0x84, 0x01, 0x0a, 0x0f, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x5f, 0x62, 0x6c, 0x6f, 0x62, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0a, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x6c, + 0x6f, 0x62, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, + 0x6d, 0x61, 0x78, 0x41, 0x67, 0x65, 0x22, 0xa8, 0x01, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3c, 0x0a, 0x06, + 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x4f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x52, 0x06, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x12, 0x45, 0x0a, 0x09, 0x6f, 0x76, + 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x52, 0x09, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x22, 0x12, 0x0a, 0x10, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x22, 0x15, 0x0a, + 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xbf, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x5f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, + 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, 0x39, 0x0a, 0x0a, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x65, 0x78, 0x70, + 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x22, 0x96, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, + 0x67, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x48, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, 0x72, + 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x32, 0xac, 0x04, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x12, 0x58, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, + 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, + 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x58, 0x0a, 0x03, 0x50, + 0x75, 0x74, 0x12, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, 0x0a, 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, + 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x87, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, + 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x35, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, + 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x7b, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, + 0xe4, 0x01, 0x0a, 0x1a, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x11, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x50, 0x01, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, + 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xa2, + 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x16, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xca, 0x02, + 0x16, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xe2, 0x02, 0x22, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x17, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x4a, 0xae, 0x23, 0x0a, 0x07, 0x12, 0x05, 0x00, 0x00, 0x92, + 0x01, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, + 0x02, 0x12, 0x03, 0x02, 0x00, 0x1f, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, + 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x27, 0x0a, 0x09, 0x0a, 0x02, + 0x03, 0x02, 0x12, 0x03, 0x06, 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x03, 0x07, + 0x00, 0x29, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x09, 0x00, 0x51, 0x0a, 0x09, 0x0a, 0x02, + 0x08, 0x0b, 0x12, 0x03, 0x09, 0x00, 0x51, 0x0a, 0x90, 0x01, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, + 0x0e, 0x00, 0x1d, 0x01, 0x1a, 0x83, 0x01, 0x0a, 0x20, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x20, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, + 0x6c, 0x2c, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, + 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x61, 0x63, 0x68, + 0x65, 0x64, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x00, + 0x01, 0x12, 0x03, 0x0e, 0x08, 0x14, 0x0a, 0x2c, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x00, 0x12, 0x03, + 0x10, 0x02, 0x36, 0x1a, 0x1f, 0x20, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x73, 0x20, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, + 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x10, + 0x06, 0x09, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x10, 0x0a, 0x19, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x10, 0x24, 0x34, 0x0a, 0x34, + 0x0a, 0x04, 0x06, 0x00, 0x02, 0x01, 0x12, 0x03, 0x13, 0x02, 0x36, 0x1a, 0x27, 0x20, 0x53, 0x74, + 0x6f, 0x72, 0x65, 0x73, 0x20, 0x6f, 0x72, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x20, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, + 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x13, + 0x06, 0x09, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x13, 0x0a, 0x19, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x13, 0x24, 0x34, 0x0a, 0x2a, + 0x0a, 0x04, 0x06, 0x00, 0x02, 0x02, 0x12, 0x03, 0x16, 0x02, 0x3f, 0x1a, 0x1d, 0x20, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x73, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, + 0x02, 0x02, 0x01, 0x12, 0x03, 0x16, 0x06, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, + 0x02, 0x12, 0x03, 0x16, 0x0d, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x16, 0x2a, 0x3d, 0x0a, 0x3a, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x03, 0x12, 0x03, 0x19, 0x02, + 0x65, 0x1a, 0x2d, 0x20, 0x47, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x20, 0x61, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x19, 0x06, 0x1c, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, 0x19, 0x1d, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, + 0x06, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x19, 0x45, 0x63, 0x0a, 0x36, 0x0a, 0x04, 0x06, 0x00, + 0x02, 0x04, 0x12, 0x03, 0x1c, 0x02, 0x59, 0x1a, 0x29, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, + 0x79, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x1c, 0x06, 0x18, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x02, 0x12, 0x03, 0x1c, 0x19, 0x32, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, 0x1c, 0x3d, 0x57, 0x0a, 0x35, 0x0a, 0x02, + 0x04, 0x00, 0x12, 0x04, 0x22, 0x00, 0x24, 0x01, 0x1a, 0x29, 0x0a, 0x20, 0x41, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, + 0x61, 0x73, 0x20, 0x6b, 0x65, 0x79, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x70, 0x61, 0x69, + 0x72, 0x73, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x22, 0x08, 0x16, 0x0a, + 0x35, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x23, 0x02, 0x21, 0x22, 0x28, 0x20, 0x41, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x61, 0x73, 0x20, 0x6b, 0x65, 0x79, 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, + 0x70, 0x61, 0x69, 0x72, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, + 0x03, 0x23, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x23, + 0x16, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x23, 0x1f, 0x20, + 0x0a, 0x5b, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x29, 0x00, 0x2e, 0x01, 0x1a, 0x4f, 0x0a, 0x20, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x2c, 0x20, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x29, 0x08, 0x10, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x00, 0x12, 0x03, 0x2a, 0x02, 0x28, 0x22, 0x24, 0x20, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, + 0x74, 0x61, 0x73, 0x6b, 0x20, 0x6f, 0x72, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x2a, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x00, 0x01, 0x12, 0x03, 0x2a, 0x12, 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x2a, 0x26, 0x27, 0x0a, 0x35, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, + 0x2b, 0x02, 0x1d, 0x22, 0x28, 0x20, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x73, 0x20, 0x6b, 0x65, 0x79, + 0x2d, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x70, 0x61, 0x69, 0x72, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x2b, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2b, 0x11, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x2b, 0x1b, 0x1c, 0x0a, 0x21, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, + 0x03, 0x2c, 0x02, 0x2b, 0x22, 0x14, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x02, 0x06, 0x12, 0x03, 0x2c, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x2c, 0x1c, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x2c, 0x29, 0x2a, 0x0a, 0x24, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x2d, 0x02, + 0x30, 0x22, 0x17, 0x20, 0x4c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x03, 0x06, 0x12, 0x03, 0x2d, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, + 0x01, 0x12, 0x03, 0x2d, 0x1c, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, + 0x03, 0x2d, 0x2e, 0x2f, 0x0a, 0x60, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x33, 0x00, 0x39, 0x01, + 0x1a, 0x54, 0x0a, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x2c, 0x20, 0x65, 0x69, + 0x74, 0x68, 0x65, 0x72, 0x20, 0x61, 0x73, 0x20, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, + 0x20, 0x6f, 0x72, 0x20, 0x61, 0x6e, 0x20, 0x55, 0x52, 0x49, 0x2c, 0x20, 0x77, 0x69, 0x74, 0x68, + 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x33, + 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x02, 0x08, 0x00, 0x12, 0x04, 0x34, 0x02, 0x37, 0x03, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x08, 0x00, 0x01, 0x12, 0x03, 0x34, 0x08, 0x0e, 0x0a, 0x1e, + 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x35, 0x04, 0x32, 0x22, 0x11, 0x20, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x20, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x03, 0x35, 0x04, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x35, 0x1e, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x35, 0x30, 0x31, 0x0a, 0x21, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x36, 0x04, 0x1a, 0x22, 0x14, 0x20, 0x55, 0x52, 0x49, 0x20, 0x74, 0x6f, 0x20, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x01, 0x05, 0x12, 0x03, 0x36, 0x04, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x36, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x36, 0x18, 0x19, 0x0a, 0x22, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x38, + 0x02, 0x18, 0x22, 0x15, 0x20, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x02, 0x06, 0x12, 0x03, 0x38, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x38, 0x0b, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, + 0x38, 0x16, 0x17, 0x0a, 0x36, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x3e, 0x00, 0x40, 0x01, 0x1a, + 0x2a, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, + 0x03, 0x01, 0x12, 0x03, 0x3e, 0x08, 0x17, 0x0a, 0x18, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, + 0x03, 0x3f, 0x02, 0x11, 0x22, 0x0b, 0x20, 0x43, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x05, 0x12, 0x03, 0x3f, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x3f, 0x09, 0x0c, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x3f, 0x0f, 0x10, 0x0a, 0x39, 0x0a, 0x02, 0x04, + 0x04, 0x12, 0x04, 0x45, 0x00, 0x47, 0x01, 0x1a, 0x2d, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, + 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x45, + 0x08, 0x18, 0x0a, 0x1c, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x46, 0x02, 0x1a, 0x22, + 0x0f, 0x20, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x06, 0x12, 0x03, 0x46, 0x02, 0x0e, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x03, 0x46, 0x0f, 0x15, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, 0x46, 0x18, 0x19, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x05, + 0x12, 0x04, 0x49, 0x00, 0x4d, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x49, + 0x08, 0x17, 0x0a, 0x1d, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x4a, 0x02, 0x15, 0x22, + 0x10, 0x20, 0x4f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6c, 0x61, 0x67, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x05, 0x12, 0x03, 0x4a, 0x02, 0x06, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x4a, 0x07, 0x10, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x4a, 0x13, 0x14, 0x0a, 0x23, 0x0a, 0x04, 0x04, + 0x05, 0x02, 0x01, 0x12, 0x03, 0x4b, 0x02, 0x17, 0x22, 0x16, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x6c, 0x6f, 0x62, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x05, 0x12, 0x03, 0x4b, 0x02, 0x06, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x01, 0x12, 0x03, 0x4b, 0x07, 0x12, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x05, 0x02, 0x01, 0x03, 0x12, 0x03, 0x4b, 0x15, 0x16, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x05, + 0x02, 0x02, 0x12, 0x03, 0x4c, 0x02, 0x27, 0x22, 0x34, 0x20, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x20, 0x61, 0x67, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x73, 0x69, 0x6e, 0x63, 0x65, + 0x20, 0x6c, 0x61, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x02, 0x06, 0x12, 0x03, 0x4c, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x05, 0x02, 0x02, 0x01, 0x12, 0x03, 0x4c, 0x1b, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x4c, 0x25, 0x26, 0x0a, 0x3a, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, 0x52, + 0x00, 0x56, 0x01, 0x1a, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, + 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x52, 0x08, 0x17, 0x0a, + 0x18, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, 0x53, 0x02, 0x11, 0x22, 0x0b, 0x20, 0x43, + 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, + 0x00, 0x05, 0x12, 0x03, 0x53, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, + 0x12, 0x03, 0x53, 0x09, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, + 0x53, 0x0f, 0x10, 0x0a, 0x1e, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x54, 0x02, 0x1a, + 0x22, 0x11, 0x20, 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x06, 0x12, 0x03, 0x54, 0x02, + 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x01, 0x12, 0x03, 0x54, 0x0f, 0x15, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x03, 0x12, 0x03, 0x54, 0x18, 0x19, 0x0a, 0x27, 0x0a, + 0x04, 0x04, 0x06, 0x02, 0x02, 0x12, 0x03, 0x55, 0x02, 0x20, 0x22, 0x1a, 0x20, 0x4f, 0x76, 0x65, + 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6c, 0x61, 0x67, 0x20, 0x69, 0x66, 0x20, 0x65, + 0x78, 0x69, 0x73, 0x74, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x06, 0x12, + 0x03, 0x55, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x01, 0x12, 0x03, 0x55, + 0x12, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, 0x03, 0x55, 0x1e, 0x1f, + 0x0a, 0x69, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x04, 0x5b, 0x00, 0x5d, 0x01, 0x1a, 0x34, 0x0a, 0x20, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x20, 0x6f, 0x66, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, + 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x0a, 0x22, 0x27, 0x20, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x2c, 0x20, 0x73, 0x75, 0x63, 0x63, + 0x65, 0x73, 0x73, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, + 0x20, 0x6e, 0x6f, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, + 0x07, 0x01, 0x12, 0x03, 0x5b, 0x08, 0x18, 0x0a, 0x34, 0x0a, 0x02, 0x04, 0x08, 0x12, 0x04, 0x62, + 0x00, 0x64, 0x01, 0x1a, 0x28, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, + 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x08, 0x01, 0x12, 0x03, 0x62, 0x08, 0x1a, 0x0a, 0x18, 0x0a, 0x04, 0x04, 0x08, 0x02, + 0x00, 0x12, 0x03, 0x63, 0x02, 0x11, 0x22, 0x0b, 0x20, 0x43, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, + 0x65, 0x79, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x05, 0x12, 0x03, 0x63, 0x02, + 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x01, 0x12, 0x03, 0x63, 0x09, 0x0c, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x03, 0x12, 0x03, 0x63, 0x0f, 0x10, 0x0a, 0x65, 0x0a, + 0x02, 0x04, 0x09, 0x12, 0x04, 0x69, 0x00, 0x6b, 0x01, 0x1a, 0x30, 0x0a, 0x20, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x6f, 0x66, + 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x22, 0x27, 0x20, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x2c, 0x20, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x20, 0x69, 0x6e, 0x64, + 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x6e, 0x6f, 0x20, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x73, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x03, 0x69, 0x08, 0x1b, + 0x0a, 0x6c, 0x0a, 0x02, 0x04, 0x0a, 0x12, 0x04, 0x6e, 0x00, 0x73, 0x01, 0x1a, 0x60, 0x20, 0x41, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x63, + 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x2c, 0x20, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, + 0x2c, 0x20, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x76, 0x61, 0x72, 0x69, + 0x6f, 0x75, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x03, 0x6e, 0x08, 0x13, 0x0a, 0x48, 0x0a, 0x04, 0x04, 0x0a, + 0x02, 0x00, 0x12, 0x03, 0x6f, 0x02, 0x11, 0x22, 0x3b, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x2d, 0x20, 0x73, 0x61, + 0x6d, 0x65, 0x20, 0x61, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x05, 0x12, 0x03, 0x6f, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x01, 0x12, 0x03, 0x6f, 0x09, 0x0c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, 0x03, 0x6f, 0x0f, 0x10, 0x0a, 0x3d, + 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x01, 0x12, 0x03, 0x70, 0x02, 0x16, 0x22, 0x30, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x0a, 0x02, 0x01, 0x05, 0x12, 0x03, 0x70, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x0a, 0x02, 0x01, 0x01, 0x12, 0x03, 0x70, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x70, 0x14, 0x15, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x02, 0x12, + 0x03, 0x71, 0x02, 0x32, 0x22, 0x34, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, + 0x02, 0x02, 0x06, 0x12, 0x03, 0x71, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x71, 0x1b, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x71, 0x30, 0x31, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x03, 0x12, 0x03, 0x72, 0x02, + 0x2b, 0x22, 0x2a, 0x20, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x0a, 0x02, 0x03, 0x06, 0x12, 0x03, 0x72, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x72, 0x1c, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0a, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x72, 0x29, 0x2a, 0x0a, 0x45, 0x0a, 0x02, 0x04, 0x0b, 0x12, 0x04, 0x78, + 0x00, 0x7c, 0x01, 0x1a, 0x39, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, + 0x61, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x0b, 0x01, 0x12, 0x03, 0x78, 0x08, 0x25, 0x0a, 0x48, 0x0a, 0x04, 0x04, 0x0b, + 0x02, 0x00, 0x12, 0x03, 0x79, 0x02, 0x11, 0x22, 0x3b, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x2d, 0x20, 0x73, 0x61, + 0x6d, 0x65, 0x20, 0x61, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x05, 0x12, 0x03, 0x79, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x01, 0x12, 0x03, 0x79, 0x09, 0x0c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x03, 0x12, 0x03, 0x79, 0x0f, 0x10, 0x0a, 0x3d, + 0x0a, 0x04, 0x04, 0x0b, 0x02, 0x01, 0x12, 0x03, 0x7a, 0x02, 0x16, 0x22, 0x30, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x0b, 0x02, 0x01, 0x05, 0x12, 0x03, 0x7a, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x0b, 0x02, 0x01, 0x01, 0x12, 0x03, 0x7a, 0x09, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x7a, 0x14, 0x15, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x0b, 0x02, 0x02, 0x12, + 0x03, 0x7b, 0x02, 0x32, 0x22, 0x34, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, 0x64, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, + 0x02, 0x02, 0x06, 0x12, 0x03, 0x7b, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x7b, 0x1b, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x7b, 0x30, 0x31, 0x0a, 0x47, 0x0a, 0x02, 0x04, 0x0c, 0x12, 0x06, 0x81, 0x01, 0x00, 0x83, + 0x01, 0x01, 0x1a, 0x39, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, + 0x20, 0x67, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x61, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, + 0x03, 0x04, 0x0c, 0x01, 0x12, 0x04, 0x81, 0x01, 0x08, 0x26, 0x0a, 0x3c, 0x0a, 0x04, 0x04, 0x0c, + 0x02, 0x00, 0x12, 0x04, 0x82, 0x01, 0x02, 0x1e, 0x22, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, + 0x77, 0x61, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6f, 0x72, 0x20, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, + 0x06, 0x12, 0x04, 0x82, 0x01, 0x02, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x01, + 0x12, 0x04, 0x82, 0x01, 0x0e, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x03, 0x12, + 0x04, 0x82, 0x01, 0x1c, 0x1d, 0x0a, 0x43, 0x0a, 0x02, 0x04, 0x0d, 0x12, 0x06, 0x88, 0x01, 0x00, + 0x8b, 0x01, 0x01, 0x1a, 0x35, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, + 0x6f, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0d, + 0x01, 0x12, 0x04, 0x88, 0x01, 0x08, 0x21, 0x0a, 0x49, 0x0a, 0x04, 0x04, 0x0d, 0x02, 0x00, 0x12, + 0x04, 0x89, 0x01, 0x02, 0x11, 0x22, 0x3b, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x2d, 0x20, 0x73, 0x61, 0x6d, 0x65, + 0x20, 0x61, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, + 0x79, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x05, 0x12, 0x04, 0x89, 0x01, 0x02, + 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x01, 0x12, 0x04, 0x89, 0x01, 0x09, 0x0c, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x03, 0x12, 0x04, 0x89, 0x01, 0x0f, 0x10, 0x0a, + 0x3e, 0x0a, 0x04, 0x04, 0x0d, 0x02, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x02, 0x16, 0x22, 0x30, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x01, 0x05, 0x12, 0x04, 0x8a, 0x01, 0x02, 0x08, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x01, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x09, 0x11, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0d, 0x02, 0x01, 0x03, 0x12, 0x04, 0x8a, 0x01, 0x14, 0x15, 0x0a, 0x6c, 0x0a, 0x02, + 0x04, 0x0e, 0x12, 0x06, 0x90, 0x01, 0x00, 0x92, 0x01, 0x01, 0x1a, 0x35, 0x0a, 0x20, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x6f, + 0x66, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x22, 0x27, 0x20, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x2c, 0x20, 0x73, 0x75, 0x63, 0x63, 0x65, + 0x73, 0x73, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, + 0x6e, 0x6f, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0e, + 0x01, 0x12, 0x04, 0x90, 0x01, 0x08, 0x22, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +]; +include!("flyteidl2.cacheservice.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.cacheservice.tonic.rs b/gen/rust/src/flyteidl2.cacheservice.tonic.rs new file mode 100644 index 0000000000..f076e8a7f9 --- /dev/null +++ b/gen/rust/src/flyteidl2.cacheservice.tonic.rs @@ -0,0 +1,606 @@ +// @generated +/// Generated client implementations. +pub mod cache_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct CacheServiceClient { + inner: tonic::client::Grpc, + } + impl CacheServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CacheServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CacheServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + CacheServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.CacheService/Get", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl2.cacheservice.CacheService", "Get")); + self.inner.unary(req, path, codec).await + } + pub async fn put( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.CacheService/Put", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl2.cacheservice.CacheService", "Put")); + self.inner.unary(req, path, codec).await + } + pub async fn delete( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.CacheService/Delete", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.cacheservice.CacheService", "Delete"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.CacheService/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.cacheservice.CacheService", + "GetOrExtendReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.CacheService/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.cacheservice.CacheService", + "ReleaseReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod cache_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with CacheServiceServer. + #[async_trait] + pub trait CacheService: Send + Sync + 'static { + async fn get( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn put( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_or_extend_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn release_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct CacheServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl CacheServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for CacheServiceServer + where + T: CacheService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/flyteidl2.cacheservice.CacheService/Get" => { + #[allow(non_camel_case_types)] + struct GetSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for GetSvc { + type Response = super::GetCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.CacheService/Put" => { + #[allow(non_camel_case_types)] + struct PutSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for PutSvc { + type Response = super::PutCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::put(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PutSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.CacheService/Delete" => { + #[allow(non_camel_case_types)] + struct DeleteSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for DeleteSvc { + type Response = super::DeleteCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.CacheService/GetOrExtendReservation" => { + #[allow(non_camel_case_types)] + struct GetOrExtendReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for GetOrExtendReservationSvc { + type Response = super::GetOrExtendReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_extend_reservation( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetOrExtendReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.CacheService/ReleaseReservation" => { + #[allow(non_camel_case_types)] + struct ReleaseReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for ReleaseReservationSvc { + type Response = super::ReleaseReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::release_reservation(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReleaseReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for CacheServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl tonic::server::NamedService for CacheServiceServer { + const NAME: &'static str = "flyteidl2.cacheservice.CacheService"; + } +} diff --git a/gen/rust/src/flyteidl2.cacheservice.v2.rs b/gen/rust/src/flyteidl2.cacheservice.v2.rs new file mode 100644 index 0000000000..03e736043f --- /dev/null +++ b/gen/rust/src/flyteidl2.cacheservice.v2.rs @@ -0,0 +1,365 @@ +// @generated +// This file is @generated by prost-build. +/// +/// Identifier for cache operations, including org, project, and domain. +/// This is used to scope cache operations to specific organizational contexts. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Identifier { + /// Organization identifier + #[prost(string, tag="1")] + pub org: ::prost::alloc::string::String, + /// Project identifier + #[prost(string, tag="2")] + pub project: ::prost::alloc::string::String, + /// Domain identifier + #[prost(string, tag="3")] + pub domain: ::prost::alloc::string::String, +} +/// +/// Request to retrieve cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetCacheRequest { + #[prost(message, optional, tag="1")] + pub base_request: ::core::option::Option, + /// Identifier for the cache operation + #[prost(message, optional, tag="2")] + pub identifier: ::core::option::Option, +} +/// +/// Request to store/update cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PutCacheRequest { + #[prost(message, optional, tag="1")] + pub base_request: ::core::option::Option, + /// Identifier for the cache operation + #[prost(message, optional, tag="2")] + pub identifier: ::core::option::Option, +} +/// +/// Request to delete cached data by key. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteCacheRequest { + #[prost(message, optional, tag="1")] + pub base_request: ::core::option::Option, + /// Identifier for the cache operation + #[prost(message, optional, tag="2")] + pub identifier: ::core::option::Option, +} +/// +/// Request to get or extend a reservation for a cache key +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationRequest { + #[prost(message, optional, tag="1")] + pub base_request: ::core::option::Option, + /// Identifier for the cache operation + #[prost(message, optional, tag="2")] + pub identifier: ::core::option::Option, +} +/// +/// Request to release the reservation for a cache key +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationRequest { + #[prost(message, optional, tag="1")] + pub base_request: ::core::option::Option, + /// Identifier for the cache operation + #[prost(message, optional, tag="2")] + pub identifier: ::core::option::Option, +} +/// Encoded file descriptor set for the `flyteidl2.cacheservice.v2` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0x88, 0x23, 0x0a, 0x2c, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x63, + 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x19, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x1a, 0x1b, 0x62, 0x75, + 0x66, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x29, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6b, 0x0a, 0x0a, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x12, 0x19, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x12, 0x21, 0x0a, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1f, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x22, 0xac, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, + 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, + 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x22, 0xac, 0x01, 0x0a, 0x0f, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4a, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, + 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, + 0xb2, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x42, 0x06, 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x22, 0xc8, 0x01, 0x0a, 0x1d, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x58, 0x0a, 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, + 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, 0xba, 0x48, 0x03, + 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x22, + 0xc0, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x54, 0x0a, + 0x0c, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x0b, 0x62, 0x61, 0x73, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x42, 0x06, + 0xba, 0x48, 0x03, 0xc8, 0x01, 0x01, 0x52, 0x0a, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x32, 0xbb, 0x04, 0x0a, 0x0c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x12, 0x5b, 0x0a, 0x03, 0x47, 0x65, 0x74, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x47, 0x65, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x5b, 0x0a, 0x03, 0x50, 0x75, 0x74, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x75, 0x74, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x74, + 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x64, 0x0a, + 0x06, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x12, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x61, 0x63, 0x68, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x8a, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x38, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x7e, 0x0a, 0x12, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x42, 0xf7, 0x01, 0x0a, 0x1d, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, + 0x76, 0x32, 0x42, 0x11, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x2f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x19, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x19, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, + 0x65, 0x5c, 0x56, 0x32, 0xe2, 0x02, 0x25, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x5c, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5c, 0x56, 0x32, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1b, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x61, 0x63, 0x68, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x3a, 0x3a, 0x56, 0x32, 0x4a, 0xa6, 0x13, 0x0a, 0x06, 0x12, + 0x04, 0x00, 0x00, 0x4d, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, + 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x22, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, + 0x03, 0x04, 0x00, 0x25, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x33, 0x0a, + 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x07, 0x00, 0x54, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, + 0x03, 0x07, 0x00, 0x54, 0x0a, 0x90, 0x01, 0x0a, 0x02, 0x06, 0x00, 0x12, 0x04, 0x0c, 0x00, 0x1b, + 0x01, 0x1a, 0x83, 0x01, 0x0a, 0x20, 0x43, 0x61, 0x63, 0x68, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x69, 0x6e, 0x67, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x2c, 0x20, + 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, + 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x06, 0x00, 0x01, 0x12, 0x03, + 0x0c, 0x08, 0x14, 0x0a, 0x2c, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0e, 0x02, 0x4d, + 0x1a, 0x1f, 0x20, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x73, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0e, 0x06, 0x09, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x0e, 0x0a, 0x19, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0e, 0x24, 0x4b, 0x0a, 0x34, 0x0a, 0x04, 0x06, + 0x00, 0x02, 0x01, 0x12, 0x03, 0x11, 0x02, 0x4d, 0x1a, 0x27, 0x20, 0x53, 0x74, 0x6f, 0x72, 0x65, + 0x73, 0x20, 0x6f, 0x72, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x73, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x11, 0x06, 0x09, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x11, 0x0a, 0x19, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x11, 0x24, 0x4b, 0x0a, 0x2a, 0x0a, 0x04, 0x06, + 0x00, 0x02, 0x02, 0x12, 0x03, 0x14, 0x02, 0x56, 0x1a, 0x1d, 0x20, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x73, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, + 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x14, 0x06, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, + 0x14, 0x0d, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x14, 0x2a, + 0x54, 0x0a, 0x3a, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x03, 0x12, 0x03, 0x17, 0x02, 0x7c, 0x1a, 0x2d, + 0x20, 0x47, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x61, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x17, 0x06, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, 0x17, 0x1d, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x17, 0x45, 0x7a, 0x0a, 0x36, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x04, 0x12, + 0x03, 0x1a, 0x02, 0x70, 0x1a, 0x29, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, + 0x6f, 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, 0x79, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x1a, 0x06, 0x18, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x04, 0x02, 0x12, 0x03, 0x1a, 0x19, 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, 0x1a, 0x3d, 0x6e, 0x0a, 0xa1, 0x01, 0x0a, 0x02, 0x04, 0x00, + 0x12, 0x04, 0x21, 0x00, 0x25, 0x01, 0x1a, 0x94, 0x01, 0x0a, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, + 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x72, 0x67, 0x2c, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x0a, + 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, + 0x20, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, + 0x66, 0x69, 0x63, 0x20, 0x6f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x21, 0x08, 0x12, 0x0a, 0x26, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x00, 0x12, 0x03, 0x22, 0x02, 0x3b, 0x22, 0x19, 0x20, 0x4f, 0x72, 0x67, 0x61, 0x6e, 0x69, 0x7a, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x22, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x22, 0x09, 0x0c, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x22, 0x0f, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x08, 0x12, 0x03, 0x22, 0x11, 0x3a, 0x0a, 0x10, 0x0a, 0x09, 0x04, 0x00, 0x02, + 0x00, 0x08, 0x87, 0x09, 0x0e, 0x02, 0x12, 0x03, 0x22, 0x12, 0x39, 0x0a, 0x21, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x01, 0x12, 0x03, 0x23, 0x02, 0x3f, 0x22, 0x14, 0x20, 0x50, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x23, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x23, 0x09, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x01, 0x03, 0x12, 0x03, 0x23, 0x13, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, + 0x08, 0x12, 0x03, 0x23, 0x15, 0x3e, 0x0a, 0x10, 0x0a, 0x09, 0x04, 0x00, 0x02, 0x01, 0x08, 0x87, + 0x09, 0x0e, 0x02, 0x12, 0x03, 0x23, 0x16, 0x3d, 0x0a, 0x20, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, + 0x12, 0x03, 0x24, 0x02, 0x3e, 0x22, 0x13, 0x20, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x02, 0x05, 0x12, 0x03, 0x24, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x24, 0x09, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x24, 0x12, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x08, 0x12, 0x03, 0x24, + 0x14, 0x3d, 0x0a, 0x10, 0x0a, 0x09, 0x04, 0x00, 0x02, 0x02, 0x08, 0x87, 0x09, 0x0e, 0x02, 0x12, + 0x03, 0x24, 0x15, 0x3c, 0x0a, 0x36, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x2a, 0x00, 0x2d, 0x01, + 0x1a, 0x2a, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x01, 0x01, 0x12, 0x03, 0x2a, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, + 0x12, 0x03, 0x2b, 0x02, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, + 0x2b, 0x02, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x2b, 0x29, + 0x35, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x2b, 0x38, 0x39, 0x0a, + 0x31, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x2c, 0x02, 0x43, 0x22, 0x24, 0x20, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x2c, 0x02, 0x0c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2c, 0x0d, 0x17, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x2c, 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x01, 0x08, 0x12, 0x03, 0x2c, 0x1c, 0x42, 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x01, + 0x02, 0x01, 0x08, 0x87, 0x09, 0x19, 0x12, 0x03, 0x2c, 0x1d, 0x41, 0x0a, 0x3a, 0x0a, 0x02, 0x04, + 0x02, 0x12, 0x04, 0x32, 0x00, 0x35, 0x01, 0x1a, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x62, + 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, + 0x32, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x33, 0x02, 0x3a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x03, 0x33, 0x02, 0x28, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x33, 0x29, 0x35, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x33, 0x38, 0x39, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x34, 0x02, 0x43, 0x22, 0x24, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, + 0x68, 0x65, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x34, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x34, 0x0d, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x34, 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x08, + 0x12, 0x03, 0x34, 0x1c, 0x42, 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x02, 0x02, 0x01, 0x08, 0x87, 0x09, + 0x19, 0x12, 0x03, 0x34, 0x1d, 0x41, 0x0a, 0x34, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x3a, 0x00, + 0x3d, 0x01, 0x1a, 0x28, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, + 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x20, 0x62, 0x79, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x03, 0x01, 0x12, 0x03, 0x3a, 0x08, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, + 0x12, 0x03, 0x3b, 0x02, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, + 0x3b, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x3b, 0x2c, + 0x38, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x3b, 0x3b, 0x3c, 0x0a, + 0x31, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x3c, 0x02, 0x43, 0x22, 0x24, 0x20, 0x49, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x06, 0x12, 0x03, 0x3c, 0x02, 0x0c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x3c, 0x0d, 0x17, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x3c, 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x01, 0x08, 0x12, 0x03, 0x3c, 0x1c, 0x42, 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x03, + 0x02, 0x01, 0x08, 0x87, 0x09, 0x19, 0x12, 0x03, 0x3c, 0x1d, 0x41, 0x0a, 0x45, 0x0a, 0x02, 0x04, + 0x04, 0x12, 0x04, 0x42, 0x00, 0x45, 0x01, 0x1a, 0x39, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x20, 0x61, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6b, 0x65, + 0x79, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x42, 0x08, 0x25, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x43, 0x02, 0x48, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x02, 0x00, 0x06, 0x12, 0x03, 0x43, 0x02, 0x36, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x43, 0x37, 0x43, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x43, 0x46, 0x47, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x44, + 0x02, 0x43, 0x22, 0x24, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, + 0x06, 0x12, 0x03, 0x44, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, + 0x03, 0x44, 0x0d, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x44, + 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x08, 0x12, 0x03, 0x44, 0x1c, 0x42, + 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x04, 0x02, 0x01, 0x08, 0x87, 0x09, 0x19, 0x12, 0x03, 0x44, 0x1d, + 0x41, 0x0a, 0x41, 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, 0x4a, 0x00, 0x4d, 0x01, 0x1a, 0x35, 0x0a, + 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x6c, 0x65, + 0x61, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6b, 0x65, 0x79, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x4a, 0x08, 0x21, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x4b, 0x02, 0x44, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x05, 0x02, 0x00, 0x06, 0x12, 0x03, 0x4b, 0x02, 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x4b, 0x33, 0x3f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x4b, 0x42, 0x43, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x01, 0x12, + 0x03, 0x4c, 0x02, 0x43, 0x22, 0x24, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, + 0x02, 0x01, 0x06, 0x12, 0x03, 0x4c, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, + 0x01, 0x12, 0x03, 0x4c, 0x0d, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x03, 0x12, + 0x03, 0x4c, 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x01, 0x08, 0x12, 0x03, 0x4c, + 0x1c, 0x42, 0x0a, 0x0f, 0x0a, 0x08, 0x04, 0x05, 0x02, 0x01, 0x08, 0x87, 0x09, 0x19, 0x12, 0x03, + 0x4c, 0x1d, 0x41, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +]; +include!("flyteidl2.cacheservice.v2.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.cacheservice.v2.tonic.rs b/gen/rust/src/flyteidl2.cacheservice.v2.tonic.rs new file mode 100644 index 0000000000..b301115cc1 --- /dev/null +++ b/gen/rust/src/flyteidl2.cacheservice.v2.tonic.rs @@ -0,0 +1,610 @@ +// @generated +/// Generated client implementations. +pub mod cache_service_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct CacheServiceClient { + inner: tonic::client::Grpc, + } + impl CacheServiceClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl CacheServiceClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> CacheServiceClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + CacheServiceClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn get( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.v2.CacheService/Get", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.cacheservice.v2.CacheService", "Get"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn put( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.v2.CacheService/Put", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.cacheservice.v2.CacheService", "Put"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn delete( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.v2.CacheService/Delete", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.cacheservice.v2.CacheService", "Delete"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.cacheservice.v2.CacheService", + "GetOrExtendReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.cacheservice.v2.CacheService", + "ReleaseReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod cache_service_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with CacheServiceServer. + #[async_trait] + pub trait CacheService: Send + Sync + 'static { + async fn get( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn put( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn delete( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_or_extend_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn release_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct CacheServiceServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl CacheServiceServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for CacheServiceServer + where + T: CacheService, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/flyteidl2.cacheservice.v2.CacheService/Get" => { + #[allow(non_camel_case_types)] + struct GetSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for GetSvc { + type Response = super::super::GetCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.v2.CacheService/Put" => { + #[allow(non_camel_case_types)] + struct PutSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService for PutSvc { + type Response = super::super::PutCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::put(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = PutSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.v2.CacheService/Delete" => { + #[allow(non_camel_case_types)] + struct DeleteSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for DeleteSvc { + type Response = super::super::DeleteCacheResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::delete(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = DeleteSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.v2.CacheService/GetOrExtendReservation" => { + #[allow(non_camel_case_types)] + struct GetOrExtendReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for GetOrExtendReservationSvc { + type Response = super::super::GetOrExtendReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_extend_reservation( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetOrExtendReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.cacheservice.v2.CacheService/ReleaseReservation" => { + #[allow(non_camel_case_types)] + struct ReleaseReservationSvc(pub Arc); + impl< + T: CacheService, + > tonic::server::UnaryService + for ReleaseReservationSvc { + type Response = super::super::ReleaseReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::release_reservation(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReleaseReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for CacheServiceServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl tonic::server::NamedService for CacheServiceServer { + const NAME: &'static str = "flyteidl2.cacheservice.v2.CacheService"; + } +} diff --git a/gen/rust/src/flyteidl2.common.rs b/gen/rust/src/flyteidl2.common.rs index 0b013f041e..e4b2c50400 100644 --- a/gen/rust/src/flyteidl2.common.rs +++ b/gen/rust/src/flyteidl2.common.rs @@ -740,6 +740,52 @@ pub mod filter { } } } +/// The source of an attribute. We may have other sources in the future. +#[pyo3::pyclass(dict, get_all, set_all)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum AttributesSource { + /// The source is unspecified. + SourceUnspecified = 0, + /// The configuration is a global configuration. + Global = 1, + /// The configuration is a domain configuration. + Domain = 2, + /// The configuration is a project configuration. + Project = 3, + /// The configuration is a project-domain configuration. + ProjectDomain = 4, + /// The configuration is a org configuration. + Org = 5, +} +impl AttributesSource { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + AttributesSource::SourceUnspecified => "SOURCE_UNSPECIFIED", + AttributesSource::Global => "GLOBAL", + AttributesSource::Domain => "DOMAIN", + AttributesSource::Project => "PROJECT", + AttributesSource::ProjectDomain => "PROJECT_DOMAIN", + AttributesSource::Org => "ORG", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "SOURCE_UNSPECIFIED" => Some(Self::SourceUnspecified), + "GLOBAL" => Some(Self::Global), + "DOMAIN" => Some(Self::Domain), + "PROJECT" => Some(Self::Project), + "PROJECT_DOMAIN" => Some(Self::ProjectDomain), + "ORG" => Some(Self::Org), + _ => None, + } + } +} /// Encoded file descriptor set for the `flyteidl2.common` package pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x0a, 0xf0, 0x32, 0x0a, 0x21, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, @@ -2210,6 +2256,71 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x04, 0x12, 0x03, 0x4f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x05, 0x12, 0x03, 0x4f, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, 0x4f, 0x12, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x4f, 0x1b, 0x1c, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0x90, 0x08, 0x0a, 0x24, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x10, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2a, 0x6c, 0x0a, 0x10, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x4f, 0x55, 0x52, 0x43, + 0x45, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, + 0x0a, 0x0a, 0x06, 0x47, 0x4c, 0x4f, 0x42, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x44, + 0x4f, 0x4d, 0x41, 0x49, 0x4e, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x4a, 0x45, + 0x43, 0x54, 0x10, 0x03, 0x12, 0x12, 0x0a, 0x0e, 0x50, 0x52, 0x4f, 0x4a, 0x45, 0x43, 0x54, 0x5f, + 0x44, 0x4f, 0x4d, 0x41, 0x49, 0x4e, 0x10, 0x04, 0x12, 0x07, 0x0a, 0x03, 0x4f, 0x52, 0x47, 0x10, + 0x05, 0x42, 0xc1, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0x12, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x34, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, + 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x10, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xca, + 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0xe2, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0xea, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x4a, 0x9b, 0x05, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x19, 0x01, + 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, + 0x03, 0x02, 0x00, 0x19, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, 0x4b, 0x0a, 0x09, + 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x04, 0x00, 0x4b, 0x0a, 0x52, 0x0a, 0x02, 0x05, 0x00, 0x12, + 0x04, 0x07, 0x00, 0x19, 0x01, 0x1a, 0x46, 0x20, 0x54, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x2e, 0x20, 0x57, 0x65, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, + 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x69, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x05, 0x00, 0x01, 0x12, 0x03, 0x07, 0x05, 0x15, 0x0a, 0x29, 0x0a, 0x04, 0x05, 0x00, 0x02, + 0x00, 0x12, 0x03, 0x09, 0x02, 0x19, 0x1a, 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x20, 0x69, 0x73, 0x20, 0x75, 0x6e, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, + 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, + 0x02, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x09, 0x17, 0x18, + 0x0a, 0x3b, 0x0a, 0x04, 0x05, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0c, 0x02, 0x0d, 0x1a, 0x2e, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x67, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x20, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x05, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x05, + 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x0c, 0x0b, 0x0c, 0x0a, 0x3b, 0x0a, 0x04, 0x05, 0x00, 0x02, + 0x02, 0x12, 0x03, 0x0f, 0x02, 0x0d, 0x1a, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, + 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x02, 0x01, 0x12, + 0x03, 0x0f, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x0f, + 0x0b, 0x0c, 0x0a, 0x3c, 0x0a, 0x04, 0x05, 0x00, 0x02, 0x03, 0x12, 0x03, 0x12, 0x02, 0x0e, 0x1a, + 0x2f, 0x20, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x12, 0x02, 0x09, 0x0a, 0x0c, + 0x0a, 0x05, 0x05, 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, 0x12, 0x0c, 0x0d, 0x0a, 0x43, 0x0a, 0x04, + 0x05, 0x00, 0x02, 0x04, 0x12, 0x03, 0x15, 0x02, 0x15, 0x1a, 0x36, 0x20, 0x54, 0x68, 0x65, 0x20, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, + 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x2d, 0x64, 0x6f, 0x6d, 0x61, 0x69, + 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x15, 0x02, 0x10, 0x0a, + 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x04, 0x02, 0x12, 0x03, 0x15, 0x13, 0x14, 0x0a, 0x38, 0x0a, + 0x04, 0x05, 0x00, 0x02, 0x05, 0x12, 0x03, 0x18, 0x02, 0x0a, 0x1a, 0x2b, 0x20, 0x54, 0x68, 0x65, + 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, + 0x73, 0x20, 0x61, 0x20, 0x6f, 0x72, 0x67, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x05, 0x01, + 0x12, 0x03, 0x18, 0x02, 0x05, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x05, 0x02, 0x12, 0x03, + 0x18, 0x08, 0x09, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ]; // @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.core.rs b/gen/rust/src/flyteidl2.core.rs index a945cf43d2..31c5150a70 100644 --- a/gen/rust/src/flyteidl2.core.rs +++ b/gen/rust/src/flyteidl2.core.rs @@ -2602,6 +2602,66 @@ pub mod quality_of_service { Spec(super::QualityOfServiceSpec), } } +/// Error message to propagate detailed errors from container executions to the execution +/// engine. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ContainerError { + /// A simplified code for errors, so that we can provide a glossary of all possible errors. + #[prost(string, tag="1")] + pub code: ::prost::alloc::string::String, + /// A detailed error message. + #[prost(string, tag="2")] + pub message: ::prost::alloc::string::String, + /// An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + #[prost(enumeration="container_error::Kind", tag="3")] + pub kind: i32, + /// Defines the origin of the error (system, user, unknown). + #[prost(enumeration="execution_error::ErrorKind", tag="4")] + pub origin: i32, +} +/// Nested message and enum types in `ContainerError`. +pub mod container_error { + /// Defines a generic error type that dictates the behavior of the retry strategy. + #[pyo3::pyclass(dict, get_all, set_all)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum Kind { + NonRecoverable = 0, + Recoverable = 1, + } + impl Kind { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Kind::NonRecoverable => "NON_RECOVERABLE", + Kind::Recoverable => "RECOVERABLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "NON_RECOVERABLE" => Some(Self::NonRecoverable), + "RECOVERABLE" => Some(Self::Recoverable), + _ => None, + } + } + } +} +/// Defines the errors.pb file format the container can produce to communicate +/// failure reasons to the execution engine. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ErrorDocument { + /// The error raised during execution. + #[prost(message, optional, tag="1")] + pub error: ::core::option::Option, +} /// ExecutionMetrics is a collection of metrics that are collected during the execution of a Flyte task. #[pyo3::pyclass(dict, get_all, set_all)] #[allow(clippy::derive_partial_eq_without_eq)] @@ -7665,76 +7725,181 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x01, 0x04, 0x22, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x06, 0x12, 0x04, 0xa2, 0x01, 0x04, 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x01, 0x12, 0x04, 0xa2, 0x01, 0x19, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x03, 0x12, 0x04, 0xa2, 0x01, 0x20, 0x21, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xdd, 0x08, 0x0a, 0x1c, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5c, 0x0a, 0x15, 0x45, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x2b, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, - 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0xaf, 0x01, 0x0a, 0x12, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x42, 0x0c, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x32, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, - 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, - 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, - 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, - 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x32, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x4a, 0xf6, 0x05, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, - 0x13, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, - 0x02, 0x12, 0x03, 0x02, 0x00, 0x17, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, - 0x26, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x49, 0x0a, 0x09, 0x0a, 0x02, 0x08, - 0x0b, 0x12, 0x03, 0x06, 0x00, 0x49, 0x0a, 0x72, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, - 0x13, 0x01, 0x1a, 0x66, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x65, 0x64, 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x46, 0x6c, - 0x79, 0x74, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, - 0x01, 0x12, 0x03, 0x09, 0x08, 0x1d, 0x0a, 0x7d, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, - 0x0b, 0x02, 0x14, 0x1a, 0x70, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x20, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x45, 0x58, 0x45, 0x43, - 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x55, 0x53, 0x45, - 0x44, 0x5f, 0x43, 0x50, 0x55, 0x5f, 0x41, 0x56, 0x47, 0x20, 0x6f, 0x72, 0x20, 0x45, 0x58, 0x45, - 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, 0x43, 0x5f, 0x55, 0x53, - 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x4d, 0x4f, 0x52, 0x59, 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x5f, - 0x41, 0x56, 0x47, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, - 0x0b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x09, - 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0b, 0x12, 0x13, 0x0a, - 0xe4, 0x02, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x12, 0x02, 0x22, 0x1a, 0xd6, 0x02, - 0x20, 0x54, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, - 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x20, 0x72, - 0x61, 0x6e, 0x67, 0x65, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, - 0x2f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x69, 0x6f, 0x2f, - 0x64, 0x6f, 0x63, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, - 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x69, 0x6e, 0x67, 0x2f, - 0x61, 0x70, 0x69, 0x2f, 0x23, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2d, - 0x71, 0x75, 0x65, 0x72, 0x79, 0x2d, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2d, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x61, 0x79, 0x20, - 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2c, 0x20, 0x64, 0x69, - 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, - 0x74, 0x68, 0x65, 0x69, 0x72, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x20, 0x6c, 0x61, 0x62, - 0x65, 0x6c, 0x73, 0x2e, 0x0a, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, 0x20, 0x74, 0x69, 0x6d, 0x65, - 0x20, 0x69, 0x73, 0x20, 0x67, 0x72, 0x65, 0x61, 0x74, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x28, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, - 0x74, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x2c, 0x20, 0x34, 0x38, 0x68, 0x20, 0x61, 0x67, 0x6f, - 0x29, 0x0a, 0x20, 0x45, 0x6e, 0x64, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x69, 0x73, 0x20, 0x6c, - 0x65, 0x73, 0x73, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x28, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x20, 0x65, 0x6e, 0x64, 0x2c, - 0x20, 0x6e, 0x6f, 0x77, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, - 0x03, 0x12, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x12, - 0x19, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x12, 0x20, 0x21, - 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0x92, 0x0d, 0x0a, 0x1b, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xe7, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x6b, 0x69, 0x6e, + 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, + 0x6e, 0x64, 0x12, 0x40, 0x0a, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x06, 0x6f, 0x72, + 0x69, 0x67, 0x69, 0x6e, 0x22, 0x2c, 0x0a, 0x04, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x13, 0x0a, 0x0f, + 0x4e, 0x4f, 0x4e, 0x5f, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c, 0x45, 0x10, + 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x43, 0x4f, 0x56, 0x45, 0x52, 0x41, 0x42, 0x4c, 0x45, + 0x10, 0x01, 0x22, 0x45, 0x0a, 0x0d, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x44, 0x6f, 0x63, 0x75, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x45, 0x72, 0x72, + 0x6f, 0x72, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x42, 0xae, 0x01, 0x0a, 0x12, 0x63, 0x6f, + 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x42, 0x0b, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0e, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x1a, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, 0x50, 0x42, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x4a, 0xd8, 0x08, 0x0a, 0x06, 0x12, + 0x04, 0x00, 0x00, 0x22, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, + 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x17, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, + 0x03, 0x04, 0x00, 0x28, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x49, 0x0a, 0x09, + 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x06, 0x00, 0x49, 0x0a, 0x6c, 0x0a, 0x02, 0x04, 0x00, 0x12, + 0x04, 0x0a, 0x00, 0x1b, 0x01, 0x1a, 0x60, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x61, 0x67, 0x61, + 0x74, 0x65, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x73, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, + 0x72, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x65, + 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, + 0x0a, 0x08, 0x16, 0x0a, 0x66, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0c, 0x02, 0x12, + 0x1a, 0x59, 0x20, 0x41, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, + 0x63, 0x6f, 0x64, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2c, + 0x20, 0x73, 0x6f, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, + 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x20, 0x61, 0x20, 0x67, 0x6c, 0x6f, 0x73, 0x73, 0x61, + 0x72, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x70, 0x6f, 0x73, 0x73, 0x69, 0x62, + 0x6c, 0x65, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x0c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x0c, 0x09, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x0c, 0x10, 0x11, 0x0a, 0x28, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, + 0x02, 0x15, 0x1a, 0x1b, 0x20, 0x41, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x20, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0e, 0x09, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0e, 0x13, 0x14, 0x0a, 0x5e, 0x0a, 0x04, 0x04, 0x00, 0x04, + 0x00, 0x12, 0x04, 0x11, 0x02, 0x14, 0x03, 0x1a, 0x50, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x73, 0x20, 0x61, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x69, 0x63, 0x20, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x64, 0x69, 0x63, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0x72, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x74, 0x72, 0x79, 0x20, 0x73, + 0x74, 0x72, 0x61, 0x74, 0x65, 0x67, 0x79, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x04, + 0x00, 0x01, 0x12, 0x03, 0x11, 0x07, 0x0b, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, + 0x00, 0x12, 0x03, 0x12, 0x04, 0x18, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x12, 0x04, 0x13, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, + 0x02, 0x12, 0x03, 0x12, 0x16, 0x17, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, + 0x12, 0x03, 0x13, 0x04, 0x14, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x13, 0x04, 0x0f, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x02, + 0x12, 0x03, 0x13, 0x12, 0x13, 0x0a, 0x63, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x17, + 0x02, 0x10, 0x1a, 0x56, 0x20, 0x41, 0x6e, 0x20, 0x61, 0x62, 0x73, 0x74, 0x72, 0x61, 0x63, 0x74, + 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x6b, 0x69, 0x6e, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x2e, 0x20, 0x44, 0x65, 0x66, 0x61, + 0x75, 0x6c, 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x4e, 0x6f, 0x6e, 0x5f, 0x52, 0x65, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x69, 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x02, 0x06, 0x12, 0x03, 0x17, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x17, 0x07, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x17, 0x0e, 0x0f, 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x1a, 0x02, + 0x26, 0x1a, 0x3a, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x72, + 0x72, 0x6f, 0x72, 0x20, 0x28, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2c, 0x20, 0x75, 0x73, 0x65, + 0x72, 0x2c, 0x20, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x29, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x03, 0x06, 0x12, 0x03, 0x1a, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x1a, 0x1b, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x1a, 0x24, 0x25, 0x0a, 0x82, 0x01, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, + 0x1f, 0x00, 0x22, 0x01, 0x1a, 0x76, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x2e, 0x70, 0x62, 0x20, 0x66, 0x69, 0x6c, + 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, + 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x75, 0x6e, 0x69, 0x63, 0x61, 0x74, + 0x65, 0x0a, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x6e, 0x67, 0x69, 0x6e, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, + 0x04, 0x01, 0x01, 0x12, 0x03, 0x1f, 0x08, 0x15, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, + 0x12, 0x03, 0x21, 0x02, 0x1b, 0x1a, 0x24, 0x20, 0x54, 0x68, 0x65, 0x20, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x20, 0x72, 0x61, 0x69, 0x73, 0x65, 0x64, 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x21, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x21, 0x11, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x21, 0x19, 0x1a, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xdd, 0x08, + 0x0a, 0x1c, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x1a, 0x1c, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x5c, 0x0a, 0x15, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x2b, 0x0a, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x42, 0xaf, 0x01, 0x0a, 0x12, 0x63, + 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x42, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x32, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0xa2, 0x02, 0x03, 0x46, 0x43, 0x58, 0xaa, 0x02, 0x0e, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x43, 0x6f, 0x72, 0x65, 0xca, 0x02, 0x0e, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0xe2, 0x02, 0x1a, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x43, 0x6f, 0x72, 0x65, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x0f, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x43, 0x6f, 0x72, 0x65, 0x4a, 0xf6, 0x05, 0x0a, + 0x06, 0x12, 0x04, 0x00, 0x00, 0x13, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, + 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x17, 0x0a, 0x09, 0x0a, 0x02, 0x03, + 0x00, 0x12, 0x03, 0x04, 0x00, 0x26, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x49, + 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x06, 0x00, 0x49, 0x0a, 0x72, 0x0a, 0x02, 0x04, + 0x00, 0x12, 0x04, 0x09, 0x00, 0x13, 0x01, 0x1a, 0x66, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x65, 0x64, 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, + 0x20, 0x61, 0x20, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x1d, 0x0a, 0x7d, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x00, 0x12, 0x03, 0x0b, 0x02, 0x14, 0x1a, 0x70, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, + 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x20, 0x65, 0x2e, 0x67, 0x2e, + 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, + 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x43, 0x50, 0x55, 0x5f, 0x41, 0x56, 0x47, 0x20, 0x6f, + 0x72, 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, + 0x49, 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x4d, 0x4f, 0x52, 0x59, 0x5f, 0x42, + 0x59, 0x54, 0x45, 0x53, 0x5f, 0x41, 0x56, 0x47, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x0b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x0b, 0x09, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x0b, 0x12, 0x13, 0x0a, 0xe4, 0x02, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x12, + 0x02, 0x22, 0x1a, 0xd6, 0x02, 0x20, 0x54, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x20, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x0a, 0x20, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x69, 0x6f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, + 0x68, 0x65, 0x75, 0x73, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x69, 0x6e, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x23, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, + 0x73, 0x69, 0x6f, 0x6e, 0x2d, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2d, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x2d, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, + 0x20, 0x6d, 0x61, 0x79, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x73, 0x65, 0x72, 0x69, 0x65, + 0x73, 0x2c, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x69, 0x61, 0x74, 0x65, + 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x69, 0x72, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x0a, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, + 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x69, 0x73, 0x20, 0x67, 0x72, 0x65, 0x61, 0x74, 0x65, 0x72, + 0x20, 0x6f, 0x66, 0x20, 0x28, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, + 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x2c, 0x20, 0x34, 0x38, + 0x68, 0x20, 0x61, 0x67, 0x6f, 0x29, 0x0a, 0x20, 0x45, 0x6e, 0x64, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x20, 0x69, 0x73, 0x20, 0x6c, 0x65, 0x73, 0x73, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x28, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, + 0x20, 0x65, 0x6e, 0x64, 0x2c, 0x20, 0x6e, 0x6f, 0x77, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, 0x12, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x12, 0x19, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x12, 0x20, 0x21, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, ]; // @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.datacatalog.rs b/gen/rust/src/flyteidl2.datacatalog.rs new file mode 100644 index 0000000000..6e8c174337 --- /dev/null +++ b/gen/rust/src/flyteidl2.datacatalog.rs @@ -0,0 +1,1907 @@ +// @generated +// This file is @generated by prost-build. +/// +/// Request message for creating a Dataset. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateDatasetRequest { + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, +} +/// +/// Response message for creating a Dataset +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct CreateDatasetResponse { +} +/// +/// Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier +/// which is a combination of several fields. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatasetRequest { + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, +} +/// +/// Response message for retrieving a Dataset. The response will include the metadata for the +/// Dataset. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetDatasetResponse { + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, +} +/// +/// Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that +/// can be one of artifact_id or tag. The result returned will include the artifact data and metadata +/// associated with the artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetArtifactRequest { + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, + #[prost(oneof="get_artifact_request::QueryHandle", tags="2, 3")] + pub query_handle: ::core::option::Option, +} +/// Nested message and enum types in `GetArtifactRequest`. +pub mod get_artifact_request { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum QueryHandle { + #[prost(string, tag="2")] + ArtifactId(::prost::alloc::string::String), + #[prost(string, tag="3")] + TagName(::prost::alloc::string::String), + } +} +/// +/// Response message for retrieving an Artifact. The result returned will include the artifact data +/// and metadata associated with the artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetArtifactResponse { + #[prost(message, optional, tag="1")] + pub artifact: ::core::option::Option, +} +/// +/// Request message for creating an Artifact and its associated artifact Data. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateArtifactRequest { + #[prost(message, optional, tag="1")] + pub artifact: ::core::option::Option, +} +/// +/// Response message for creating an Artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct CreateArtifactResponse { +} +/// +/// Request message for tagging an Artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AddTagRequest { + #[prost(message, optional, tag="1")] + pub tag: ::core::option::Option, +} +/// +/// Response message for tagging an Artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct AddTagResponse { +} +/// List the artifacts that belong to the Dataset, optionally filtered using filtered expression. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListArtifactsRequest { + /// Use a datasetID for which you want to retrieve the artifacts + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, + /// Apply the filter expression to this query + #[prost(message, optional, tag="2")] + pub filter: ::core::option::Option, + /// Pagination options to get a page of artifacts + #[prost(message, optional, tag="3")] + pub pagination: ::core::option::Option, +} +/// Response to list artifacts +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListArtifactsResponse { + /// The list of artifacts + #[prost(message, repeated, tag="1")] + pub artifacts: ::prost::alloc::vec::Vec, + /// Token to use to request the next page, pass this into the next requests PaginationOptions + #[prost(string, tag="2")] + pub next_token: ::prost::alloc::string::String, +} +/// List the datasets for the given query +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDatasetsRequest { + /// Apply the filter expression to this query + #[prost(message, optional, tag="1")] + pub filter: ::core::option::Option, + /// Pagination options to get a page of datasets + #[prost(message, optional, tag="2")] + pub pagination: ::core::option::Option, +} +/// List the datasets response with token for next pagination +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ListDatasetsResponse { + /// The list of datasets + #[prost(message, repeated, tag="1")] + pub datasets: ::prost::alloc::vec::Vec, + /// Token to use to request the next page, pass this into the next requests PaginationOptions + #[prost(string, tag="2")] + pub next_token: ::prost::alloc::string::String, +} +/// +/// Request message for updating an Artifact and overwriting its associated ArtifactData. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateArtifactRequest { + /// ID of dataset the artifact is associated with + #[prost(message, optional, tag="1")] + pub dataset: ::core::option::Option, + /// List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + /// ArtifactData entries will be removed from the underlying blob storage and database. + #[prost(message, repeated, tag="4")] + pub data: ::prost::alloc::vec::Vec, + /// Update execution metadata(including execution domain, name, node, project data) when overwriting cache + #[prost(message, optional, tag="5")] + pub metadata: ::core::option::Option, + /// Either ID of artifact or name of tag to retrieve existing artifact from + #[prost(oneof="update_artifact_request::QueryHandle", tags="2, 3")] + pub query_handle: ::core::option::Option, +} +/// Nested message and enum types in `UpdateArtifactRequest`. +pub mod update_artifact_request { + /// Either ID of artifact or name of tag to retrieve existing artifact from + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum QueryHandle { + #[prost(string, tag="2")] + ArtifactId(::prost::alloc::string::String), + #[prost(string, tag="3")] + TagName(::prost::alloc::string::String), + } +} +/// +/// Response message for updating an Artifact. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UpdateArtifactResponse { + /// The unique ID of the artifact updated + #[prost(string, tag="1")] + pub artifact_id: ::prost::alloc::string::String, +} +/// +/// ReservationID message that is composed of several string fields. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReservationId { + /// The unique ID for the reserved dataset + #[prost(message, optional, tag="1")] + pub dataset_id: ::core::option::Option, + /// The specific artifact tag for the reservation + #[prost(string, tag="2")] + pub tag_name: ::prost::alloc::string::String, +} +/// Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationRequest { + /// The unique ID for the reservation + #[prost(message, optional, tag="1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, + /// Requested reservation extension heartbeat interval + #[prost(message, optional, tag="3")] + pub heartbeat_interval: ::core::option::Option, +} +/// A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Reservation { + /// The unique ID for the reservation + #[prost(message, optional, tag="1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, + /// Recommended heartbeat interval to extend reservation + #[prost(message, optional, tag="3")] + pub heartbeat_interval: ::core::option::Option, + /// Expiration timestamp of this reservation + #[prost(message, optional, tag="4")] + pub expires_at: ::core::option::Option, + /// Free-form metadata associated with the artifact + #[prost(message, optional, tag="6")] + pub metadata: ::core::option::Option, +} +/// Response including either a newly minted reservation or the existing reservation +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct GetOrExtendReservationResponse { + /// The reservation to be acquired or extended + #[prost(message, optional, tag="1")] + pub reservation: ::core::option::Option, +} +/// Request to release reservation +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ReleaseReservationRequest { + /// The unique ID for the reservation + #[prost(message, optional, tag="1")] + pub reservation_id: ::core::option::Option, + /// The unique ID of the owner for the reservation + #[prost(string, tag="2")] + pub owner_id: ::prost::alloc::string::String, +} +/// Response to release reservation +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct ReleaseReservationResponse { +} +/// +/// Dataset message. It is uniquely identified by DatasetID. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Dataset { + #[prost(message, optional, tag="1")] + pub id: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub metadata: ::core::option::Option, + #[prost(string, repeated, tag="3")] + pub partition_keys: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, +} +/// +/// An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Partition { + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub value: ::prost::alloc::string::String, +} +/// +/// DatasetID message that is composed of several string fields. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatasetId { + /// The name of the project + #[prost(string, tag="1")] + pub project: ::prost::alloc::string::String, + /// The name of the dataset + #[prost(string, tag="2")] + pub name: ::prost::alloc::string::String, + /// The domain (eg. environment) + #[prost(string, tag="3")] + pub domain: ::prost::alloc::string::String, + /// Version of the data schema + #[prost(string, tag="4")] + pub version: ::prost::alloc::string::String, + /// UUID for the dataset (if set the above fields are optional) + #[prost(string, tag="5")] + pub uuid: ::prost::alloc::string::String, + /// Optional, org key applied to the resource. + #[prost(string, tag="6")] + pub org: ::prost::alloc::string::String, +} +/// +/// Artifact message. It is composed of several string fields. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Artifact { + /// The unique ID of the artifact + #[prost(string, tag="1")] + pub id: ::prost::alloc::string::String, + /// The Dataset that the artifact belongs to + #[prost(message, optional, tag="2")] + pub dataset: ::core::option::Option, + /// A list of data that is associated with the artifact + #[prost(message, repeated, tag="3")] + pub data: ::prost::alloc::vec::Vec, + /// Free-form metadata associated with the artifact + #[prost(message, optional, tag="4")] + pub metadata: ::core::option::Option, + #[prost(message, repeated, tag="5")] + pub partitions: ::prost::alloc::vec::Vec, + #[prost(message, repeated, tag="6")] + pub tags: ::prost::alloc::vec::Vec, + /// creation timestamp of artifact, autogenerated by service + #[prost(message, optional, tag="7")] + pub created_at: ::core::option::Option, +} +/// +/// ArtifactData that belongs to an artifact +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactData { + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + #[prost(message, optional, tag="2")] + pub value: ::core::option::Option, +} +/// +/// Tag message that is unique to a Dataset. It is associated to a single artifact and +/// can be retrieved by name later. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Tag { + /// Name of tag + #[prost(string, tag="1")] + pub name: ::prost::alloc::string::String, + /// The tagged artifact + #[prost(string, tag="2")] + pub artifact_id: ::prost::alloc::string::String, + /// The Dataset that this tag belongs to + #[prost(message, optional, tag="3")] + pub dataset: ::core::option::Option, +} +/// +/// Metadata representation for artifacts and datasets +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct Metadata { + /// key map is a dictionary of key/val strings that represent metadata + #[prost(map="string, string", tag="1")] + pub key_map: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +/// Filter expression that is composed of a combination of single filters +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct FilterExpression { + #[prost(message, repeated, tag="1")] + pub filters: ::prost::alloc::vec::Vec, +} +/// A single property to filter on. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SinglePropertyFilter { + /// field 10 in case we add more entities to query + #[prost(enumeration="single_property_filter::ComparisonOperator", tag="10")] + pub operator: i32, + #[prost(oneof="single_property_filter::PropertyFilter", tags="1, 2, 3, 4")] + pub property_filter: ::core::option::Option, +} +/// Nested message and enum types in `SinglePropertyFilter`. +pub mod single_property_filter { + /// as use-cases come up we can add more operators, ex: gte, like, not eq etc. + #[pyo3::pyclass(dict, get_all, set_all)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum ComparisonOperator { + Equals = 0, + } + impl ComparisonOperator { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ComparisonOperator::Equals => "EQUALS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "EQUALS" => Some(Self::Equals), + _ => None, + } + } + } + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum PropertyFilter { + #[prost(message, tag="1")] + TagFilter(super::TagPropertyFilter), + #[prost(message, tag="2")] + PartitionFilter(super::PartitionPropertyFilter), + #[prost(message, tag="3")] + ArtifactFilter(super::ArtifactPropertyFilter), + #[prost(message, tag="4")] + DatasetFilter(super::DatasetPropertyFilter), + } +} +/// Artifact properties we can filter by +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ArtifactPropertyFilter { + /// oneof because we can add more properties in the future + #[prost(oneof="artifact_property_filter::Property", tags="1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `ArtifactPropertyFilter`. +pub mod artifact_property_filter { + /// oneof because we can add more properties in the future + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag="1")] + ArtifactId(::prost::alloc::string::String), + } +} +/// Tag properties we can filter by +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TagPropertyFilter { + #[prost(oneof="tag_property_filter::Property", tags="1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `TagPropertyFilter`. +pub mod tag_property_filter { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag="1")] + TagName(::prost::alloc::string::String), + } +} +/// Partition properties we can filter by +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PartitionPropertyFilter { + #[prost(oneof="partition_property_filter::Property", tags="1")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `PartitionPropertyFilter`. +pub mod partition_property_filter { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(message, tag="1")] + KeyVal(super::KeyValuePair), + } +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct KeyValuePair { + #[prost(string, tag="1")] + pub key: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub value: ::prost::alloc::string::String, +} +/// Dataset properties we can filter by +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatasetPropertyFilter { + #[prost(oneof="dataset_property_filter::Property", tags="1, 2, 3, 4, 5")] + pub property: ::core::option::Option, +} +/// Nested message and enum types in `DatasetPropertyFilter`. +pub mod dataset_property_filter { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Property { + #[prost(string, tag="1")] + Project(::prost::alloc::string::String), + #[prost(string, tag="2")] + Name(::prost::alloc::string::String), + #[prost(string, tag="3")] + Domain(::prost::alloc::string::String), + #[prost(string, tag="4")] + Version(::prost::alloc::string::String), + /// Optional, org key applied to the dataset. + #[prost(string, tag="5")] + Org(::prost::alloc::string::String), + } +} +/// Pagination options for making list requests +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PaginationOptions { + /// the max number of results to return + #[prost(uint32, tag="1")] + pub limit: u32, + /// the token to pass to fetch the next page + #[prost(string, tag="2")] + pub token: ::prost::alloc::string::String, + /// the property that we want to sort the results by + #[prost(enumeration="pagination_options::SortKey", tag="3")] + pub sort_key: i32, + /// the sort order of the results + #[prost(enumeration="pagination_options::SortOrder", tag="4")] + pub sort_order: i32, +} +/// Nested message and enum types in `PaginationOptions`. +pub mod pagination_options { + #[pyo3::pyclass(dict, get_all, set_all)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum SortOrder { + Descending = 0, + Ascending = 1, + } + impl SortOrder { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SortOrder::Descending => "DESCENDING", + SortOrder::Ascending => "ASCENDING", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DESCENDING" => Some(Self::Descending), + "ASCENDING" => Some(Self::Ascending), + _ => None, + } + } + } + #[pyo3::pyclass(dict, get_all, set_all)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum SortKey { + CreationTime = 0, + } + impl SortKey { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + SortKey::CreationTime => "CREATION_TIME", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CREATION_TIME" => Some(Self::CreationTime), + _ => None, + } + } + } +} +/// Encoded file descriptor set for the `flyteidl2.datacatalog` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0x98, 0xa1, 0x01, 0x0a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2f, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x15, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x50, 0x0a, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x38, 0x0a, 0x07, + 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x07, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x22, 0x17, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, + 0x4f, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x22, 0x4e, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x22, 0xa0, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, + 0x61, 0x6d, 0x65, 0x42, 0x0e, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x68, 0x61, 0x6e, + 0x64, 0x6c, 0x65, 0x22, 0x52, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x08, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x08, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x22, 0x54, 0x0a, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x08, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x52, 0x08, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x22, 0x18, 0x0a, + 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x0a, 0x0d, 0x41, 0x64, 0x64, 0x54, 0x61, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2c, 0x0a, 0x03, 0x74, 0x61, 0x67, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x54, 0x61, + 0x67, 0x52, 0x03, 0x74, 0x61, 0x67, 0x22, 0x10, 0x0a, 0x0e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xdd, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, + 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x3f, 0x0a, + 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, + 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, + 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, + 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x75, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x3d, 0x0a, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x09, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, + 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0xa0, 0x01, 0x0a, 0x13, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3f, 0x0a, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x06, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x71, 0x0a, 0x14, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x08, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, + 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x99, 0x02, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x21, 0x0a, 0x0b, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x1b, + 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x37, 0x0a, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x42, 0x0e, 0x0a, 0x0c, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x68, 0x61, 0x6e, 0x64, 0x6c, + 0x65, 0x22, 0x39, 0x0a, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x22, 0x6b, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x12, 0x3f, 0x0a, + 0x0a, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, + 0x74, 0x49, 0x44, 0x52, 0x09, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x64, 0x12, 0x19, + 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0xd1, 0x01, 0x0a, 0x1d, 0x47, 0x65, + 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, 0x6e, 0x65, + 0x72, 0x49, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, 0x61, 0x72, + 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x22, 0xb7, 0x02, + 0x0a, 0x0b, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x12, 0x48, 0x0a, 0x12, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, + 0x61, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x11, 0x68, 0x65, + 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x12, + 0x39, 0x0a, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, + 0x09, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x41, 0x74, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x66, 0x0a, 0x1e, 0x47, 0x65, 0x74, 0x4f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x44, 0x0a, 0x0b, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0b, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x83, 0x01, 0x0a, 0x19, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4b, 0x0a, + 0x0e, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x44, 0x52, 0x0d, 0x72, 0x65, 0x73, + 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x49, 0x64, 0x22, 0x1c, 0x0a, 0x1a, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x9e, 0x01, 0x0a, 0x07, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, + 0x30, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x24, + 0x0a, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x73, 0x18, + 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x4b, 0x65, 0x79, 0x73, 0x22, 0x33, 0x0a, 0x09, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x55, 0x55, 0x49, 0x44, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x55, 0x55, 0x49, 0x44, 0x12, 0x10, 0x0a, 0x03, 0x6f, + 0x72, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x22, 0xf9, 0x02, + 0x0a, 0x08, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x3b, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x40, 0x0a, 0x0a, + 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, + 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x54, 0x61, 0x67, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x39, + 0x0a, 0x0a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, + 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74, 0x22, 0x51, 0x0a, 0x0c, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2d, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x76, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x12, 0x3a, 0x0a, 0x07, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x52, 0x07, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x22, 0x8b, 0x01, 0x0a, 0x08, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x12, 0x44, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, 0x79, 0x4d, 0x61, + 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x22, 0x59, 0x0a, 0x10, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x45, 0x78, 0x70, 0x72, + 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x45, 0x0a, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x22, 0x80, 0x04, + 0x0a, 0x14, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x49, 0x0a, 0x0a, 0x74, 0x61, 0x67, 0x5f, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x2e, 0x54, 0x61, 0x67, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x74, 0x61, 0x67, 0x46, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x12, 0x5b, 0x0a, 0x10, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x70, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x58, + 0x0a, 0x0f, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, + 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x55, 0x0a, 0x0e, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x48, 0x00, + 0x52, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, + 0x5a, 0x0a, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0e, 0x32, 0x3e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x53, 0x69, 0x6e, 0x67, 0x6c, 0x65, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x20, 0x0a, 0x12, 0x43, + 0x6f, 0x6d, 0x70, 0x61, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x12, 0x0a, 0x0a, 0x06, 0x45, 0x51, 0x55, 0x41, 0x4c, 0x53, 0x10, 0x00, 0x42, 0x11, 0x0a, + 0x0f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x5f, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x22, 0x47, 0x0a, 0x16, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0b, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, + 0x00, 0x52, 0x0a, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x42, 0x0a, 0x0a, + 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0x3c, 0x0a, 0x11, 0x54, 0x61, 0x67, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x12, 0x1b, + 0x0a, 0x08, 0x74, 0x61, 0x67, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x48, 0x00, 0x52, 0x07, 0x74, 0x61, 0x67, 0x4e, 0x61, 0x6d, 0x65, 0x42, 0x0a, 0x0a, 0x08, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0x65, 0x0a, 0x17, 0x50, 0x61, 0x72, 0x74, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x12, 0x3e, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x48, 0x00, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x56, + 0x61, 0x6c, 0x42, 0x0a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0x36, + 0x0a, 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x50, 0x61, 0x69, 0x72, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x9f, 0x01, 0x0a, 0x15, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x12, 0x1a, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x48, 0x00, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x48, 0x00, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x07, + 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, + 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x03, 0x6f, 0x72, 0x67, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x03, 0x6f, 0x72, 0x67, 0x42, 0x0a, 0x0a, 0x08, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x22, 0xa7, 0x02, 0x0a, 0x11, 0x50, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x14, + 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x6c, + 0x69, 0x6d, 0x69, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x4a, 0x0a, 0x07, 0x73, 0x6f, + 0x72, 0x74, 0x4b, 0x65, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x30, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x52, 0x07, 0x73, + 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, 0x50, 0x0a, 0x09, 0x73, 0x6f, 0x72, 0x74, 0x4f, 0x72, + 0x64, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x2e, 0x53, 0x6f, 0x72, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x52, 0x09, 0x73, + 0x6f, 0x72, 0x74, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x22, 0x2a, 0x0a, 0x09, 0x53, 0x6f, 0x72, 0x74, + 0x4f, 0x72, 0x64, 0x65, 0x72, 0x12, 0x0e, 0x0a, 0x0a, 0x44, 0x45, 0x53, 0x43, 0x45, 0x4e, 0x44, + 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x53, 0x43, 0x45, 0x4e, 0x44, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x22, 0x1c, 0x0a, 0x07, 0x53, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x12, + 0x11, 0x0a, 0x0d, 0x43, 0x52, 0x45, 0x41, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x54, 0x49, 0x4d, 0x45, + 0x10, 0x00, 0x32, 0xcf, 0x08, 0x0a, 0x0b, 0x44, 0x61, 0x74, 0x61, 0x43, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x12, 0x6a, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x12, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x44, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x61, + 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x12, 0x28, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, + 0x65, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, + 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x64, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x12, + 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2a, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, + 0x12, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, + 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, 0x64, 0x64, 0x54, 0x61, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x41, + 0x64, 0x64, 0x54, 0x61, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x6a, 0x0a, + 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x12, 0x2b, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x67, 0x0a, 0x0c, 0x4c, 0x69, 0x73, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x12, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x6d, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x12, 0x2c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x85, 0x01, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, + 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, + 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x79, 0x0a, 0x12, 0x52, 0x65, 0x6c, + 0x65, 0x61, 0x73, 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x30, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x52, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x31, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, + 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x42, 0xdd, 0x01, 0x0a, 0x19, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, + 0x6f, 0x67, 0x42, 0x10, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x64, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, + 0x67, 0xa2, 0x02, 0x03, 0x46, 0x44, 0x58, 0xaa, 0x02, 0x15, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xca, + 0x02, 0x15, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x44, 0x61, 0x74, 0x61, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0xe2, 0x02, 0x21, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x16, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x44, 0x61, 0x74, 0x61, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x4a, 0x9d, 0x6e, 0x0a, 0x07, 0x12, 0x05, 0x00, 0x00, 0x9c, 0x03, 0x01, + 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, + 0x03, 0x02, 0x00, 0x1e, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x27, 0x0a, + 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, + 0x12, 0x03, 0x06, 0x00, 0x29, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x08, 0x00, 0x50, 0x0a, + 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x08, 0x00, 0x50, 0x0a, 0xff, 0x01, 0x0a, 0x02, 0x06, + 0x00, 0x12, 0x04, 0x11, 0x00, 0x3c, 0x01, 0x1a, 0xd4, 0x01, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, + 0x20, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x44, 0x61, 0x74, + 0x61, 0x20, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x69, 0x7a, 0x65, + 0x64, 0x2c, 0x20, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x6c, 0x79, 0x2d, 0x74, 0x79, 0x70, 0x65, + 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, + 0x20, 0x61, 0x63, 0x72, 0x6f, 0x73, 0x73, 0x20, 0x72, 0x65, 0x76, 0x69, 0x73, 0x69, 0x6f, 0x6e, + 0x73, 0x2e, 0x0a, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x20, 0x61, 0x72, + 0x65, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, + 0x68, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x74, 0x61, 0x67, 0x67, 0x65, 0x64, 0x20, 0x66, + 0x6f, 0x72, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x61, 0x6c, 0x2e, 0x0a, 0x32, 0x1c, + 0x20, 0x54, 0x4f, 0x44, 0x4f, 0x20, 0x40, 0x70, 0x76, 0x64, 0x69, 0x74, 0x74, 0x20, 0x63, 0x6c, + 0x65, 0x61, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x75, 0x70, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, + 0x06, 0x00, 0x01, 0x12, 0x03, 0x11, 0x08, 0x13, 0x0a, 0xab, 0x01, 0x0a, 0x04, 0x06, 0x00, 0x02, + 0x00, 0x12, 0x03, 0x14, 0x02, 0x4a, 0x1a, 0x9d, 0x01, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x20, 0x61, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x2e, 0x20, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x20, 0x62, 0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x2e, 0x20, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, + 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x45, 0x61, 0x63, 0x68, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, + 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x72, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x01, 0x12, + 0x03, 0x14, 0x06, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x14, + 0x14, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x14, 0x33, 0x48, + 0x0a, 0x65, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x01, 0x12, 0x03, 0x17, 0x02, 0x41, 0x1a, 0x58, 0x20, + 0x47, 0x65, 0x74, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x62, 0x79, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x2e, 0x20, + 0x54, 0x68, 0x69, 0x73, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x17, 0x06, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, + 0x17, 0x11, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x17, 0x2d, + 0x3f, 0x0a, 0x93, 0x01, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x02, 0x12, 0x03, 0x1b, 0x02, 0x4d, 0x1a, + 0x85, 0x01, 0x20, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x73, 0x73, 0x6f, + 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x69, 0x74, 0x2e, 0x20, + 0x41, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, + 0x62, 0x65, 0x20, 0x61, 0x20, 0x68, 0x69, 0x76, 0x65, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x72, 0x62, 0x69, 0x74, 0x72, 0x61, 0x72, 0x79, + 0x0a, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x20, 0x6f, 0x72, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x1b, 0x06, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, + 0x1b, 0x15, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x1b, 0x35, + 0x4b, 0x0a, 0x74, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x03, 0x12, 0x03, 0x1e, 0x02, 0x44, 0x1a, 0x67, + 0x20, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x62, 0x79, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x20, + 0x54, 0x68, 0x69, 0x73, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x73, 0x20, 0x61, 0x6e, 0x20, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x61, 0x6c, 0x6f, 0x6e, 0x67, 0x20, 0x77, + 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x1e, 0x06, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, + 0x1e, 0x12, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x1e, 0x2f, + 0x42, 0x0a, 0x52, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x04, 0x12, 0x03, 0x21, 0x02, 0x35, 0x1a, 0x45, + 0x20, 0x41, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x20, 0x61, 0x20, 0x74, 0x61, 0x67, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x2e, 0x20, 0x54, 0x61, 0x67, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, + 0x21, 0x06, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x02, 0x12, 0x03, 0x21, 0x0d, + 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, 0x21, 0x25, 0x33, 0x0a, + 0x33, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x05, 0x12, 0x03, 0x24, 0x02, 0x4a, 0x1a, 0x26, 0x20, 0x52, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, + 0x64, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x05, 0x01, 0x12, 0x03, 0x24, + 0x06, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x05, 0x02, 0x12, 0x03, 0x24, 0x14, 0x28, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x05, 0x03, 0x12, 0x03, 0x24, 0x33, 0x48, 0x0a, 0x32, + 0x0a, 0x04, 0x06, 0x00, 0x02, 0x06, 0x12, 0x03, 0x27, 0x02, 0x47, 0x1a, 0x25, 0x20, 0x52, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x20, 0x61, 0x20, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x64, + 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x06, 0x01, 0x12, 0x03, 0x27, 0x06, 0x12, + 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x06, 0x02, 0x12, 0x03, 0x27, 0x13, 0x26, 0x0a, 0x0c, + 0x0a, 0x05, 0x06, 0x00, 0x02, 0x06, 0x03, 0x12, 0x03, 0x27, 0x31, 0x45, 0x0a, 0x71, 0x0a, 0x04, + 0x06, 0x00, 0x02, 0x07, 0x12, 0x03, 0x2a, 0x02, 0x4d, 0x1a, 0x64, 0x20, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x20, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2c, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, + 0x69, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, + 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x6c, 0x79, 0x69, 0x6e, 0x67, + 0x20, 0x62, 0x6c, 0x6f, 0x62, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x07, 0x01, 0x12, 0x03, 0x2a, 0x06, 0x14, 0x0a, 0x0c, 0x0a, + 0x05, 0x06, 0x00, 0x02, 0x07, 0x02, 0x12, 0x03, 0x2a, 0x15, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, + 0x00, 0x02, 0x07, 0x03, 0x12, 0x03, 0x2a, 0x35, 0x4b, 0x0a, 0xfd, 0x06, 0x0a, 0x04, 0x06, 0x00, + 0x02, 0x08, 0x12, 0x03, 0x37, 0x02, 0x65, 0x1a, 0xef, 0x06, 0x20, 0x41, 0x74, 0x74, 0x65, 0x6d, + 0x70, 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x64, 0x20, 0x61, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x72, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, + 0x79, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x0a, 0x20, 0x28, 0x69, 0x65, 0x2e, 0x20, 0x61, + 0x6e, 0x6f, 0x74, 0x68, 0x65, 0x72, 0x20, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x6f, 0x77, + 0x6e, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x29, 0x20, 0x74, 0x68, 0x65, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x72, 0x65, 0x74, + 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x4f, 0x6e, 0x63, 0x65, 0x20, 0x79, 0x6f, + 0x75, 0x20, 0x61, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x20, 0x61, 0x20, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x6e, 0x65, 0x65, + 0x64, 0x20, 0x74, 0x6f, 0x20, 0x20, 0x70, 0x65, 0x72, 0x69, 0x6f, 0x64, 0x69, 0x63, 0x61, 0x6c, + 0x6c, 0x79, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x61, + 0x6e, 0x0a, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x20, 0x63, 0x61, 0x6c, + 0x6c, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x64, 0x65, 0x64, 0x20, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x65, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, 0x0a, 0x20, + 0x61, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, 0x6e, 0x6f, 0x74, + 0x68, 0x65, 0x72, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x20, 0x4e, 0x6f, 0x74, 0x65, 0x3a, + 0x20, 0x57, 0x65, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x61, 0x6d, 0x65, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x74, 0x75, 0x72, 0x65, 0x20, 0x61, + 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x61, 0x6d, 0x65, 0x20, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x0a, 0x20, 0x74, 0x72, 0x79, 0x20, 0x74, 0x6f, 0x20, 0x70, + 0x6f, 0x70, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x61, 0x6d, 0x65, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x73, 0x61, 0x6d, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x20, 0x54, 0x68, 0x75, 0x73, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2c, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x6f, 0x6e, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x63, 0x61, 0x6e, 0x0a, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x61, 0x74, 0x20, 0x61, 0x20, 0x74, + 0x69, 0x6d, 0x65, 0x2c, 0x20, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x4e, 0x6f, 0x74, 0x65, 0x3a, 0x20, 0x49, 0x66, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x41, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x65, 0x78, 0x70, 0x69, 0x72, 0x65, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x0a, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x42, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x74, 0x61, + 0x6b, 0x65, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x77, 0x6f, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x20, + 0x41, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x42, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x20, + 0x69, 0x6e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6c, 0x6c, 0x65, 0x6c, 0x2e, 0x20, 0x53, 0x6f, 0x0a, + 0x20, 0x61, 0x20, 0x74, 0x68, 0x69, 0x72, 0x64, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x43, 0x20, + 0x6d, 0x61, 0x79, 0x20, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x41, 0x20, 0x6f, 0x72, 0x20, 0x42, + 0x2c, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x65, 0x76, 0x65, 0x72, 0x20, 0x77, 0x72, 0x69, 0x74, + 0x65, 0x73, 0x20, 0x6c, 0x61, 0x73, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, + 0x08, 0x01, 0x12, 0x03, 0x37, 0x06, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x08, 0x02, + 0x12, 0x03, 0x37, 0x1d, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x08, 0x03, 0x12, 0x03, + 0x37, 0x45, 0x63, 0x0a, 0x77, 0x0a, 0x04, 0x06, 0x00, 0x02, 0x09, 0x12, 0x03, 0x3b, 0x02, 0x59, + 0x1a, 0x6a, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x68, 0x6f, 0x6c, 0x64, 0x69, 0x6e, 0x67, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x6f, 0x74, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x73, 0x20, + 0x73, 0x6f, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x0a, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x67, 0x72, 0x61, + 0x62, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x6f, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x06, 0x00, 0x02, 0x09, 0x01, 0x12, 0x03, 0x3b, 0x06, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, + 0x02, 0x09, 0x02, 0x12, 0x03, 0x3b, 0x19, 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x06, 0x00, 0x02, 0x09, + 0x03, 0x12, 0x03, 0x3b, 0x3d, 0x57, 0x0a, 0x36, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x41, 0x00, + 0x43, 0x01, 0x1a, 0x2a, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x2e, 0x0a, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x41, 0x08, 0x1c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, + 0x02, 0x00, 0x12, 0x03, 0x42, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x42, 0x02, 0x09, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x42, 0x0a, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x42, 0x14, + 0x15, 0x0a, 0x35, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x03, 0x48, 0x00, 0x20, 0x1a, 0x2a, 0x0a, 0x20, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, + 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, + 0x03, 0x48, 0x08, 0x1d, 0x0a, 0x97, 0x01, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x4e, 0x00, 0x50, + 0x01, 0x1a, 0x8a, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, + 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x2e, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x69, 0x73, 0x20, 0x72, + 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x69, 0x74, 0x27, 0x73, + 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x0a, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, + 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, + 0x76, 0x65, 0x72, 0x61, 0x6c, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, + 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x4e, 0x08, 0x19, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, + 0x02, 0x00, 0x12, 0x03, 0x4f, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x4f, 0x02, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x4f, 0x0c, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x4f, 0x16, + 0x17, 0x0a, 0x72, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x56, 0x00, 0x58, 0x01, 0x1a, 0x66, 0x0a, + 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, + 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x56, 0x08, + 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x57, 0x02, 0x16, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x57, 0x02, 0x09, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x57, 0x0a, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x57, 0x14, 0x15, 0x0a, 0xef, 0x01, 0x0a, 0x02, 0x04, 0x04, 0x12, + 0x04, 0x5f, 0x00, 0x66, 0x01, 0x1a, 0xe2, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x2e, 0x20, 0x52, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, 0x61, + 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x62, 0x61, 0x73, 0x65, 0x64, + 0x20, 0x6f, 0x6e, 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, 0x68, 0x61, 0x6e, 0x64, + 0x6c, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x0a, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, + 0x6f, 0x6e, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x61, 0x67, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x77, + 0x69, 0x6c, 0x6c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x61, 0x73, 0x73, 0x6f, + 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, + 0x01, 0x12, 0x03, 0x5f, 0x08, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, + 0x60, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x06, 0x12, 0x03, 0x60, 0x02, + 0x0b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x03, 0x60, 0x0c, 0x13, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, 0x60, 0x16, 0x17, 0x0a, 0x0c, 0x0a, + 0x04, 0x04, 0x04, 0x08, 0x00, 0x12, 0x04, 0x62, 0x02, 0x65, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x08, 0x00, 0x01, 0x12, 0x03, 0x62, 0x08, 0x14, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, + 0x01, 0x12, 0x03, 0x63, 0x04, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x05, 0x12, + 0x03, 0x63, 0x04, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, 0x03, 0x63, + 0x0b, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x63, 0x19, 0x1a, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, 0x64, 0x04, 0x18, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x04, 0x02, 0x02, 0x05, 0x12, 0x03, 0x64, 0x04, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x64, 0x0b, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x64, 0x16, 0x17, 0x0a, 0x9b, 0x01, 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, + 0x6c, 0x00, 0x6e, 0x01, 0x1a, 0x8e, 0x01, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x65, + 0x74, 0x72, 0x69, 0x65, 0x76, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x65, 0x64, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x69, + 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, + 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, 0x03, 0x6c, 0x08, + 0x1b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x6d, 0x02, 0x18, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x06, 0x12, 0x03, 0x6d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x6d, 0x0b, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x6d, 0x16, 0x17, 0x0a, 0x59, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x04, + 0x73, 0x00, 0x75, 0x01, 0x1a, 0x4d, 0x0a, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, 0x73, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, + 0x74, 0x65, 0x64, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x44, 0x61, 0x74, + 0x61, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, 0x73, 0x08, 0x1d, 0x0a, + 0x0b, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, 0x74, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x06, 0x02, 0x00, 0x06, 0x12, 0x03, 0x74, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, + 0x02, 0x00, 0x01, 0x12, 0x03, 0x74, 0x0b, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x74, 0x16, 0x17, 0x0a, 0x38, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x03, 0x7a, 0x00, + 0x21, 0x1a, 0x2d, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, + 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, 0x03, 0x7a, 0x08, 0x1e, 0x0a, 0x38, 0x0a, 0x02, + 0x04, 0x08, 0x12, 0x05, 0x7f, 0x00, 0x81, 0x01, 0x01, 0x1a, 0x2b, 0x0a, 0x20, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x74, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x08, 0x01, 0x12, 0x03, 0x7f, + 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x00, 0x12, 0x04, 0x80, 0x01, 0x02, 0x0e, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x06, 0x12, 0x04, 0x80, 0x01, 0x02, 0x05, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x01, 0x12, 0x04, 0x80, 0x01, 0x06, 0x09, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x03, 0x12, 0x04, 0x80, 0x01, 0x0c, 0x0d, 0x0a, 0x38, 0x0a, + 0x02, 0x04, 0x09, 0x12, 0x04, 0x86, 0x01, 0x00, 0x19, 0x1a, 0x2c, 0x0a, 0x20, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x74, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x04, + 0x86, 0x01, 0x08, 0x16, 0x0a, 0x6d, 0x0a, 0x02, 0x04, 0x0a, 0x12, 0x06, 0x89, 0x01, 0x00, 0x91, + 0x01, 0x01, 0x1a, 0x5f, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x62, 0x65, 0x6c, + 0x6f, 0x6e, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x2c, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x04, 0x89, 0x01, 0x08, 0x1c, + 0x0a, 0x4c, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x00, 0x12, 0x04, 0x8b, 0x01, 0x02, 0x18, 0x1a, 0x3e, + 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x79, 0x6f, 0x75, 0x20, 0x77, + 0x61, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x06, 0x12, 0x04, 0x8b, 0x01, 0x02, 0x0b, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0a, 0x02, 0x00, 0x01, 0x12, 0x04, 0x8b, 0x01, 0x0c, 0x13, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, 0x04, 0x8b, 0x01, 0x16, 0x17, 0x0a, 0x39, 0x0a, 0x04, 0x04, + 0x0a, 0x02, 0x01, 0x12, 0x04, 0x8e, 0x01, 0x02, 0x1e, 0x1a, 0x2b, 0x20, 0x41, 0x70, 0x70, 0x6c, + 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x20, 0x65, 0x78, 0x70, + 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x06, 0x12, + 0x04, 0x8e, 0x01, 0x02, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x01, 0x12, 0x04, + 0x8e, 0x01, 0x13, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x03, 0x12, 0x04, 0x8e, + 0x01, 0x1c, 0x1d, 0x0a, 0x3d, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x02, 0x12, 0x04, 0x90, 0x01, 0x02, + 0x23, 0x1a, 0x2f, 0x20, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x20, + 0x70, 0x61, 0x67, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x73, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, 0x06, 0x12, 0x04, 0x90, 0x01, 0x02, + 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, 0x01, 0x12, 0x04, 0x90, 0x01, 0x14, 0x1e, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, 0x03, 0x12, 0x04, 0x90, 0x01, 0x21, 0x22, 0x0a, + 0x2a, 0x0a, 0x02, 0x04, 0x0b, 0x12, 0x06, 0x94, 0x01, 0x00, 0x99, 0x01, 0x01, 0x1a, 0x1c, 0x20, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x73, 0x74, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, + 0x0b, 0x01, 0x12, 0x04, 0x94, 0x01, 0x08, 0x1d, 0x0a, 0x25, 0x0a, 0x04, 0x04, 0x0b, 0x02, 0x00, + 0x12, 0x04, 0x96, 0x01, 0x02, 0x22, 0x1a, 0x17, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x69, 0x73, + 0x74, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x04, 0x12, 0x04, 0x96, 0x01, 0x02, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x06, 0x12, 0x04, 0x96, 0x01, 0x0b, 0x13, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0b, 0x02, 0x00, 0x01, 0x12, 0x04, 0x96, 0x01, 0x14, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x0b, 0x02, 0x00, 0x03, 0x12, 0x04, 0x96, 0x01, 0x20, 0x21, 0x0a, 0x69, 0x0a, 0x04, 0x04, + 0x0b, 0x02, 0x01, 0x12, 0x04, 0x98, 0x01, 0x02, 0x18, 0x1a, 0x5b, 0x20, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, + 0x65, 0x2c, 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x74, + 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x73, 0x20, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x01, 0x05, 0x12, + 0x04, 0x98, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x01, 0x01, 0x12, 0x04, + 0x98, 0x01, 0x09, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x01, 0x03, 0x12, 0x04, 0x98, + 0x01, 0x16, 0x17, 0x0a, 0x35, 0x0a, 0x02, 0x04, 0x0c, 0x12, 0x06, 0x9c, 0x01, 0x00, 0xa1, 0x01, + 0x01, 0x1a, 0x27, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x67, 0x69, + 0x76, 0x65, 0x6e, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0c, + 0x01, 0x12, 0x04, 0x9c, 0x01, 0x08, 0x1b, 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x0c, 0x02, 0x00, 0x12, + 0x04, 0x9e, 0x01, 0x02, 0x1e, 0x1a, 0x2b, 0x20, 0x41, 0x70, 0x70, 0x6c, 0x79, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x20, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, + 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x06, 0x12, 0x04, 0x9e, 0x01, 0x02, + 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x01, 0x12, 0x04, 0x9e, 0x01, 0x13, 0x19, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x03, 0x12, 0x04, 0x9e, 0x01, 0x1c, 0x1d, 0x0a, + 0x3c, 0x0a, 0x04, 0x04, 0x0c, 0x02, 0x01, 0x12, 0x04, 0xa0, 0x01, 0x02, 0x23, 0x1a, 0x2e, 0x20, + 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x20, 0x70, 0x61, 0x67, 0x65, + 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0c, 0x02, 0x01, 0x06, 0x12, 0x04, 0xa0, 0x01, 0x02, 0x13, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x0c, 0x02, 0x01, 0x01, 0x12, 0x04, 0xa0, 0x01, 0x14, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x0c, 0x02, 0x01, 0x03, 0x12, 0x04, 0xa0, 0x01, 0x21, 0x22, 0x0a, 0x49, 0x0a, 0x02, 0x04, 0x0d, + 0x12, 0x06, 0xa4, 0x01, 0x00, 0xa9, 0x01, 0x01, 0x1a, 0x3b, 0x20, 0x4c, 0x69, 0x73, 0x74, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x20, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, 0x69, 0x6e, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0d, 0x01, 0x12, 0x04, 0xa4, 0x01, + 0x08, 0x1c, 0x0a, 0x24, 0x0a, 0x04, 0x04, 0x0d, 0x02, 0x00, 0x12, 0x04, 0xa6, 0x01, 0x02, 0x20, + 0x1a, 0x16, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x64, + 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, + 0x04, 0x12, 0x04, 0xa6, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x06, + 0x12, 0x04, 0xa6, 0x01, 0x0b, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x01, 0x12, + 0x04, 0xa6, 0x01, 0x13, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x00, 0x03, 0x12, 0x04, + 0xa6, 0x01, 0x1e, 0x1f, 0x0a, 0x69, 0x0a, 0x04, 0x04, 0x0d, 0x02, 0x01, 0x12, 0x04, 0xa8, 0x01, + 0x02, 0x18, 0x1a, 0x5b, 0x20, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, + 0x65, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, 0x65, 0x2c, 0x20, 0x70, 0x61, 0x73, 0x73, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, + 0x65, 0x78, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x20, 0x50, 0x61, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x01, 0x05, 0x12, 0x04, 0xa8, 0x01, 0x02, 0x08, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0d, 0x02, 0x01, 0x01, 0x12, 0x04, 0xa8, 0x01, 0x09, 0x13, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0d, 0x02, 0x01, 0x03, 0x12, 0x04, 0xa8, 0x01, 0x16, 0x17, 0x0a, 0x66, 0x0a, 0x02, + 0x04, 0x0e, 0x12, 0x06, 0xae, 0x01, 0x00, 0xbe, 0x01, 0x01, 0x1a, 0x58, 0x0a, 0x20, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x77, + 0x72, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x69, 0x74, 0x73, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, + 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0e, 0x01, 0x12, 0x04, 0xae, 0x01, 0x08, + 0x1d, 0x0a, 0x3d, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x00, 0x12, 0x04, 0xb0, 0x01, 0x02, 0x18, 0x1a, + 0x2f, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x69, 0x73, 0x20, + 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x06, 0x12, 0x04, 0xb0, 0x01, 0x02, 0x0b, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x01, 0x12, 0x04, 0xb0, 0x01, 0x0c, 0x13, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x03, 0x12, 0x04, 0xb0, 0x01, 0x16, 0x17, 0x0a, 0x59, 0x0a, + 0x04, 0x04, 0x0e, 0x08, 0x00, 0x12, 0x06, 0xb3, 0x01, 0x02, 0xb6, 0x01, 0x03, 0x1a, 0x49, 0x20, + 0x45, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x61, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, + 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x08, 0x00, + 0x01, 0x12, 0x04, 0xb3, 0x01, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x01, 0x12, + 0x04, 0xb4, 0x01, 0x04, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x01, 0x05, 0x12, 0x04, + 0xb4, 0x01, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x01, 0x01, 0x12, 0x04, 0xb4, + 0x01, 0x0b, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x01, 0x03, 0x12, 0x04, 0xb4, 0x01, + 0x19, 0x1a, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x02, 0x12, 0x04, 0xb5, 0x01, 0x04, 0x18, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x02, 0x05, 0x12, 0x04, 0xb5, 0x01, 0x04, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x02, 0x01, 0x12, 0x04, 0xb5, 0x01, 0x0b, 0x13, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x02, 0x03, 0x12, 0x04, 0xb5, 0x01, 0x16, 0x17, 0x0a, 0xd4, 0x01, + 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x03, 0x12, 0x04, 0xba, 0x01, 0x02, 0x21, 0x1a, 0xc5, 0x01, 0x20, + 0x4c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74, 0x6f, 0x20, + 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x77, + 0x69, 0x74, 0x68, 0x2e, 0x20, 0x4d, 0x75, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x20, 0x41, 0x4c, 0x4c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, + 0x61, 0x73, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x0a, 0x20, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x20, 0x65, 0x6e, 0x74, + 0x72, 0x69, 0x65, 0x73, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x6d, + 0x6f, 0x76, 0x65, 0x64, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65, 0x20, 0x75, 0x6e, + 0x64, 0x65, 0x72, 0x6c, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x6c, 0x6f, 0x62, 0x20, 0x73, 0x74, + 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, + 0x73, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x03, 0x04, 0x12, 0x04, 0xba, + 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x03, 0x06, 0x12, 0x04, 0xba, 0x01, + 0x0b, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x03, 0x01, 0x12, 0x04, 0xba, 0x01, 0x18, + 0x1c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x03, 0x03, 0x12, 0x04, 0xba, 0x01, 0x1f, 0x20, + 0x0a, 0x76, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x04, 0x12, 0x04, 0xbd, 0x01, 0x02, 0x18, 0x1a, 0x68, + 0x20, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x28, 0x69, 0x6e, 0x63, 0x6c, 0x75, + 0x64, 0x69, 0x6e, 0x67, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2c, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x6e, 0x6f, 0x64, + 0x65, 0x2c, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x29, + 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x77, 0x72, 0x69, 0x74, 0x69, 0x6e, + 0x67, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x04, + 0x06, 0x12, 0x04, 0xbd, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x04, 0x01, + 0x12, 0x04, 0xbd, 0x01, 0x0b, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x04, 0x03, 0x12, + 0x04, 0xbd, 0x01, 0x16, 0x17, 0x0a, 0x3b, 0x0a, 0x02, 0x04, 0x0f, 0x12, 0x06, 0xc3, 0x01, 0x00, + 0xc6, 0x01, 0x01, 0x1a, 0x2d, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0f, 0x01, 0x12, 0x04, 0xc3, 0x01, 0x08, 0x1e, 0x0a, + 0x35, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x00, 0x12, 0x04, 0xc5, 0x01, 0x02, 0x19, 0x1a, 0x27, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x64, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x05, 0x12, + 0x04, 0xc5, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x01, 0x12, 0x04, + 0xc5, 0x01, 0x09, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x03, 0x12, 0x04, 0xc5, + 0x01, 0x17, 0x18, 0x0a, 0x51, 0x0a, 0x02, 0x04, 0x10, 0x12, 0x06, 0xcb, 0x01, 0x00, 0xd1, 0x01, + 0x01, 0x1a, 0x43, 0x0a, 0x20, 0x52, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x44, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, + 0x69, 0x73, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x73, + 0x65, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x10, 0x01, 0x12, 0x04, 0xcb, + 0x01, 0x08, 0x15, 0x0a, 0x36, 0x0a, 0x04, 0x04, 0x10, 0x02, 0x00, 0x12, 0x04, 0xcd, 0x01, 0x02, + 0x1b, 0x1a, 0x28, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, + 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x65, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x10, 0x02, 0x00, 0x06, 0x12, 0x04, 0xcd, 0x01, 0x02, 0x0b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, + 0x02, 0x00, 0x01, 0x12, 0x04, 0xcd, 0x01, 0x0c, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, 0x02, + 0x00, 0x03, 0x12, 0x04, 0xcd, 0x01, 0x19, 0x1a, 0x0a, 0x3d, 0x0a, 0x04, 0x04, 0x10, 0x02, 0x01, + 0x12, 0x04, 0xd0, 0x01, 0x02, 0x16, 0x1a, 0x2f, 0x20, 0x54, 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x74, + 0x61, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, 0x02, 0x01, 0x05, + 0x12, 0x04, 0xd0, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, 0x02, 0x01, 0x01, 0x12, + 0x04, 0xd0, 0x01, 0x09, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, 0x02, 0x01, 0x03, 0x12, 0x04, + 0xd0, 0x01, 0x14, 0x15, 0x0a, 0x7a, 0x0a, 0x02, 0x04, 0x11, 0x12, 0x06, 0xd4, 0x01, 0x00, 0xdd, + 0x01, 0x01, 0x1a, 0x6c, 0x20, 0x54, 0x72, 0x79, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x63, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x20, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x61, 0x6e, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x0a, + 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x11, 0x01, 0x12, 0x04, 0xd4, 0x01, 0x08, 0x25, 0x0a, 0x31, 0x0a, + 0x04, 0x04, 0x11, 0x02, 0x00, 0x12, 0x04, 0xd6, 0x01, 0x02, 0x23, 0x1a, 0x23, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x00, 0x06, 0x12, 0x04, 0xd6, 0x01, 0x02, 0x0f, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x00, 0x01, 0x12, 0x04, 0xd6, 0x01, 0x10, 0x1e, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x11, 0x02, 0x00, 0x03, 0x12, 0x04, 0xd6, 0x01, 0x21, 0x22, 0x0a, 0x3e, 0x0a, + 0x04, 0x04, 0x11, 0x02, 0x01, 0x12, 0x04, 0xd9, 0x01, 0x02, 0x16, 0x1a, 0x30, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x11, 0x02, 0x01, 0x05, 0x12, 0x04, 0xd9, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x11, 0x02, 0x01, 0x01, 0x12, 0x04, 0xd9, 0x01, 0x09, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x11, 0x02, 0x01, 0x03, 0x12, 0x04, 0xd9, 0x01, 0x14, 0x15, 0x0a, 0x42, 0x0a, 0x04, 0x04, 0x11, + 0x02, 0x02, 0x12, 0x04, 0xdc, 0x01, 0x02, 0x32, 0x1a, 0x34, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x65, 0x64, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x68, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x06, 0x12, 0x04, 0xdc, 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x11, 0x02, 0x02, 0x01, 0x12, 0x04, 0xdc, 0x01, 0x1b, 0x2d, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x11, 0x02, 0x02, 0x03, 0x12, 0x04, 0xdc, 0x01, 0x30, 0x31, 0x0a, 0x6e, 0x0a, 0x02, 0x04, + 0x12, 0x12, 0x06, 0xe0, 0x01, 0x00, 0xef, 0x01, 0x01, 0x1a, 0x60, 0x20, 0x41, 0x20, 0x72, 0x65, + 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x69, 0x6e, 0x67, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x2c, 0x20, 0x68, 0x65, 0x61, 0x72, 0x74, + 0x62, 0x65, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, 0x2c, 0x20, 0x65, + 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x76, 0x61, 0x72, 0x69, 0x6f, 0x75, 0x73, + 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, + 0x12, 0x01, 0x12, 0x04, 0xe0, 0x01, 0x08, 0x13, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x12, 0x02, 0x00, + 0x12, 0x04, 0xe2, 0x01, 0x02, 0x23, 0x1a, 0x23, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x12, 0x02, 0x00, 0x06, 0x12, 0x04, 0xe2, 0x01, 0x02, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, + 0x02, 0x00, 0x01, 0x12, 0x04, 0xe2, 0x01, 0x10, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, + 0x00, 0x03, 0x12, 0x04, 0xe2, 0x01, 0x21, 0x22, 0x0a, 0x3e, 0x0a, 0x04, 0x04, 0x12, 0x02, 0x01, + 0x12, 0x04, 0xe5, 0x01, 0x02, 0x16, 0x1a, 0x30, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x77, + 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x01, + 0x05, 0x12, 0x04, 0xe5, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x01, 0x01, + 0x12, 0x04, 0xe5, 0x01, 0x09, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x01, 0x03, 0x12, + 0x04, 0xe5, 0x01, 0x14, 0x15, 0x0a, 0x44, 0x0a, 0x04, 0x04, 0x12, 0x02, 0x02, 0x12, 0x04, 0xe8, + 0x01, 0x02, 0x32, 0x1a, 0x36, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65, 0x6e, 0x64, 0x65, + 0x64, 0x20, 0x68, 0x65, 0x61, 0x72, 0x74, 0x62, 0x65, 0x61, 0x74, 0x20, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x76, 0x61, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x64, 0x20, 0x72, + 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x12, 0x02, 0x02, 0x06, 0x12, 0x04, 0xe8, 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, + 0x02, 0x02, 0x01, 0x12, 0x04, 0xe8, 0x01, 0x1b, 0x2d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, + 0x02, 0x03, 0x12, 0x04, 0xe8, 0x01, 0x30, 0x31, 0x0a, 0x38, 0x0a, 0x04, 0x04, 0x12, 0x02, 0x03, + 0x12, 0x04, 0xeb, 0x01, 0x02, 0x2b, 0x1a, 0x2a, 0x20, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x03, 0x06, 0x12, 0x04, 0xeb, 0x01, 0x02, + 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x03, 0x01, 0x12, 0x04, 0xeb, 0x01, 0x1c, 0x26, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x03, 0x03, 0x12, 0x04, 0xeb, 0x01, 0x29, 0x2a, 0x0a, + 0x3f, 0x0a, 0x04, 0x04, 0x12, 0x02, 0x04, 0x12, 0x04, 0xee, 0x01, 0x02, 0x18, 0x1a, 0x31, 0x20, + 0x46, 0x72, 0x65, 0x65, 0x2d, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x04, 0x06, 0x12, 0x04, 0xee, 0x01, 0x02, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x04, 0x01, 0x12, 0x04, 0xee, 0x01, 0x0b, 0x13, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x12, 0x02, 0x04, 0x03, 0x12, 0x04, 0xee, 0x01, 0x16, 0x17, 0x0a, 0x60, 0x0a, + 0x02, 0x04, 0x13, 0x12, 0x06, 0xf2, 0x01, 0x00, 0xf5, 0x01, 0x01, 0x1a, 0x52, 0x20, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, + 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x61, 0x20, 0x6e, 0x65, 0x77, 0x6c, 0x79, 0x20, + 0x6d, 0x69, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x69, 0x73, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, + 0x0b, 0x0a, 0x03, 0x04, 0x13, 0x01, 0x12, 0x04, 0xf2, 0x01, 0x08, 0x26, 0x0a, 0x3a, 0x0a, 0x04, + 0x04, 0x13, 0x02, 0x00, 0x12, 0x04, 0xf4, 0x01, 0x02, 0x1e, 0x1a, 0x2c, 0x20, 0x54, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x6f, 0x20, + 0x62, 0x65, 0x20, 0x61, 0x63, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x6f, 0x72, 0x20, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x64, 0x65, 0x64, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x13, 0x02, 0x00, + 0x06, 0x12, 0x04, 0xf4, 0x01, 0x02, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x13, 0x02, 0x00, 0x01, + 0x12, 0x04, 0xf4, 0x01, 0x0e, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x13, 0x02, 0x00, 0x03, 0x12, + 0x04, 0xf4, 0x01, 0x1c, 0x1d, 0x0a, 0x2e, 0x0a, 0x02, 0x04, 0x14, 0x12, 0x06, 0xf8, 0x01, 0x00, + 0xfe, 0x01, 0x01, 0x1a, 0x20, 0x20, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, + 0x20, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x14, 0x01, 0x12, 0x04, 0xf8, 0x01, + 0x08, 0x21, 0x0a, 0x31, 0x0a, 0x04, 0x04, 0x14, 0x02, 0x00, 0x12, 0x04, 0xfa, 0x01, 0x02, 0x23, + 0x1a, 0x23, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x00, 0x06, 0x12, 0x04, + 0xfa, 0x01, 0x02, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x00, 0x01, 0x12, 0x04, 0xfa, + 0x01, 0x10, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x00, 0x03, 0x12, 0x04, 0xfa, 0x01, + 0x21, 0x22, 0x0a, 0x3e, 0x0a, 0x04, 0x04, 0x14, 0x02, 0x01, 0x12, 0x04, 0xfd, 0x01, 0x02, 0x16, + 0x1a, 0x30, 0x20, 0x54, 0x68, 0x65, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x77, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x05, 0x12, 0x04, 0xfd, 0x01, 0x02, + 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x01, 0x12, 0x04, 0xfd, 0x01, 0x09, 0x11, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x03, 0x12, 0x04, 0xfd, 0x01, 0x14, 0x15, 0x0a, + 0x2d, 0x0a, 0x02, 0x04, 0x15, 0x12, 0x04, 0x81, 0x02, 0x00, 0x25, 0x1a, 0x21, 0x20, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x61, 0x73, + 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0b, + 0x0a, 0x03, 0x04, 0x15, 0x01, 0x12, 0x04, 0x81, 0x02, 0x08, 0x22, 0x0a, 0x49, 0x0a, 0x02, 0x04, + 0x16, 0x12, 0x06, 0x86, 0x02, 0x00, 0x8a, 0x02, 0x01, 0x1a, 0x3b, 0x0a, 0x20, 0x44, 0x61, 0x74, + 0x61, 0x73, 0x65, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x20, 0x49, 0x74, + 0x20, 0x69, 0x73, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x6c, 0x79, 0x20, 0x69, 0x64, 0x65, + 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x49, 0x44, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x16, 0x01, 0x12, 0x04, 0x86, + 0x02, 0x08, 0x0f, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x16, 0x02, 0x00, 0x12, 0x04, 0x87, 0x02, 0x02, + 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x00, 0x06, 0x12, 0x04, 0x87, 0x02, 0x02, 0x0b, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x00, 0x01, 0x12, 0x04, 0x87, 0x02, 0x0c, 0x0e, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x00, 0x03, 0x12, 0x04, 0x87, 0x02, 0x11, 0x12, 0x0a, 0x0c, + 0x0a, 0x04, 0x04, 0x16, 0x02, 0x01, 0x12, 0x04, 0x88, 0x02, 0x02, 0x18, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x16, 0x02, 0x01, 0x06, 0x12, 0x04, 0x88, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x16, 0x02, 0x01, 0x01, 0x12, 0x04, 0x88, 0x02, 0x0b, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, + 0x02, 0x01, 0x03, 0x12, 0x04, 0x88, 0x02, 0x16, 0x17, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x16, 0x02, + 0x02, 0x12, 0x04, 0x89, 0x02, 0x02, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x02, 0x04, + 0x12, 0x04, 0x89, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x02, 0x05, 0x12, + 0x04, 0x89, 0x02, 0x0b, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x02, 0x01, 0x12, 0x04, + 0x89, 0x02, 0x12, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x16, 0x02, 0x02, 0x03, 0x12, 0x04, 0x89, + 0x02, 0x22, 0x23, 0x0a, 0x7a, 0x0a, 0x02, 0x04, 0x17, 0x12, 0x06, 0x8f, 0x02, 0x00, 0x92, 0x02, + 0x01, 0x1a, 0x6c, 0x0a, 0x20, 0x41, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x20, 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x6d, 0x75, 0x6c, 0x74, + 0x69, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x70, 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x6e, 0x20, 0x61, + 0x72, 0x62, 0x69, 0x74, 0x72, 0x61, 0x72, 0x79, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, + 0x6b, 0x65, 0x79, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x70, 0x61, 0x69, 0x72, 0x0a, 0x0a, + 0x0b, 0x0a, 0x03, 0x04, 0x17, 0x01, 0x12, 0x04, 0x8f, 0x02, 0x08, 0x11, 0x0a, 0x0c, 0x0a, 0x04, + 0x04, 0x17, 0x02, 0x00, 0x12, 0x04, 0x90, 0x02, 0x02, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, + 0x02, 0x00, 0x05, 0x12, 0x04, 0x90, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, 0x02, + 0x00, 0x01, 0x12, 0x04, 0x90, 0x02, 0x09, 0x0c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, 0x02, 0x00, + 0x03, 0x12, 0x04, 0x90, 0x02, 0x0f, 0x10, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x17, 0x02, 0x01, 0x12, + 0x04, 0x91, 0x02, 0x02, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, 0x02, 0x01, 0x05, 0x12, 0x04, + 0x91, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, 0x02, 0x01, 0x01, 0x12, 0x04, 0x91, + 0x02, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x17, 0x02, 0x01, 0x03, 0x12, 0x04, 0x91, 0x02, + 0x11, 0x12, 0x0a, 0x4d, 0x0a, 0x02, 0x04, 0x18, 0x12, 0x06, 0x97, 0x02, 0x00, 0xa0, 0x02, 0x01, + 0x1a, 0x3f, 0x0a, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x49, 0x44, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6f, + 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x76, 0x65, 0x72, 0x61, + 0x6c, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x2e, + 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x18, 0x01, 0x12, 0x04, 0x97, 0x02, 0x08, 0x11, 0x0a, 0x27, + 0x0a, 0x04, 0x04, 0x18, 0x02, 0x00, 0x12, 0x04, 0x98, 0x02, 0x02, 0x15, 0x22, 0x19, 0x20, 0x54, + 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x00, 0x05, + 0x12, 0x04, 0x98, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x00, 0x01, 0x12, + 0x04, 0x98, 0x02, 0x09, 0x10, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x00, 0x03, 0x12, 0x04, + 0x98, 0x02, 0x13, 0x14, 0x0a, 0x27, 0x0a, 0x04, 0x04, 0x18, 0x02, 0x01, 0x12, 0x04, 0x99, 0x02, + 0x02, 0x12, 0x22, 0x19, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x18, 0x02, 0x01, 0x05, 0x12, 0x04, 0x99, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x18, 0x02, 0x01, 0x01, 0x12, 0x04, 0x99, 0x02, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x18, 0x02, 0x01, 0x03, 0x12, 0x04, 0x99, 0x02, 0x10, 0x11, 0x0a, 0x2c, 0x0a, 0x04, 0x04, 0x18, + 0x02, 0x02, 0x12, 0x04, 0x9a, 0x02, 0x02, 0x14, 0x22, 0x1e, 0x20, 0x54, 0x68, 0x65, 0x20, 0x64, + 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x28, 0x65, 0x67, 0x2e, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, + 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x02, + 0x05, 0x12, 0x04, 0x9a, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x02, 0x01, + 0x12, 0x04, 0x9a, 0x02, 0x09, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x02, 0x03, 0x12, + 0x04, 0x9a, 0x02, 0x12, 0x13, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x18, 0x02, 0x03, 0x12, 0x04, 0x9b, + 0x02, 0x02, 0x15, 0x22, 0x1c, 0x20, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x03, 0x05, 0x12, 0x04, 0x9b, 0x02, 0x02, 0x08, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x03, 0x01, 0x12, 0x04, 0x9b, 0x02, 0x09, 0x10, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x03, 0x03, 0x12, 0x04, 0x9b, 0x02, 0x13, 0x14, 0x0a, 0x4b, + 0x0a, 0x04, 0x04, 0x18, 0x02, 0x04, 0x12, 0x04, 0x9c, 0x02, 0x02, 0x12, 0x22, 0x3d, 0x20, 0x55, + 0x55, 0x49, 0x44, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x20, 0x28, 0x69, 0x66, 0x20, 0x73, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x61, 0x62, 0x6f, 0x76, 0x65, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x20, 0x61, 0x72, 0x65, + 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x29, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x18, 0x02, 0x04, 0x05, 0x12, 0x04, 0x9c, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, + 0x02, 0x04, 0x01, 0x12, 0x04, 0x9c, 0x02, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, + 0x04, 0x03, 0x12, 0x04, 0x9c, 0x02, 0x10, 0x11, 0x0a, 0x3a, 0x0a, 0x04, 0x04, 0x18, 0x02, 0x05, + 0x12, 0x04, 0x9f, 0x02, 0x02, 0x11, 0x1a, 0x2c, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x2c, 0x20, 0x6f, 0x72, 0x67, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x69, + 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x05, 0x05, 0x12, 0x04, 0x9f, + 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x05, 0x01, 0x12, 0x04, 0x9f, 0x02, + 0x09, 0x0c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x18, 0x02, 0x05, 0x03, 0x12, 0x04, 0x9f, 0x02, 0x0f, + 0x10, 0x0a, 0x4b, 0x0a, 0x02, 0x04, 0x19, 0x12, 0x06, 0xa5, 0x02, 0x00, 0xad, 0x02, 0x01, 0x1a, + 0x3d, 0x0a, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x73, 0x65, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x76, 0x65, 0x72, 0x61, 0x6c, 0x20, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, + 0x0a, 0x03, 0x04, 0x19, 0x01, 0x12, 0x04, 0xa5, 0x02, 0x08, 0x10, 0x0a, 0x2d, 0x0a, 0x04, 0x04, + 0x19, 0x02, 0x00, 0x12, 0x04, 0xa6, 0x02, 0x02, 0x10, 0x22, 0x1f, 0x20, 0x54, 0x68, 0x65, 0x20, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, + 0x02, 0x00, 0x05, 0x12, 0x04, 0xa6, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, + 0x00, 0x01, 0x12, 0x04, 0xa6, 0x02, 0x09, 0x0b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x00, + 0x03, 0x12, 0x04, 0xa6, 0x02, 0x0e, 0x0f, 0x0a, 0x38, 0x0a, 0x04, 0x04, 0x19, 0x02, 0x01, 0x12, + 0x04, 0xa7, 0x02, 0x02, 0x18, 0x22, 0x2a, 0x20, 0x54, 0x68, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, + 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x62, 0x65, 0x6c, 0x6f, 0x6e, 0x67, 0x73, 0x20, 0x74, 0x6f, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x01, 0x06, 0x12, 0x04, 0xa7, 0x02, 0x02, 0x0b, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x01, 0x01, 0x12, 0x04, 0xa7, 0x02, 0x0c, 0x13, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x01, 0x03, 0x12, 0x04, 0xa7, 0x02, 0x16, 0x17, 0x0a, 0x43, + 0x0a, 0x04, 0x04, 0x19, 0x02, 0x02, 0x12, 0x04, 0xa8, 0x02, 0x02, 0x21, 0x22, 0x35, 0x20, 0x41, + 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74, 0x68, + 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x02, 0x04, 0x12, 0x04, 0xa8, 0x02, + 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x02, 0x06, 0x12, 0x04, 0xa8, 0x02, 0x0b, + 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x02, 0x01, 0x12, 0x04, 0xa8, 0x02, 0x18, 0x1c, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x02, 0x03, 0x12, 0x04, 0xa8, 0x02, 0x1f, 0x20, 0x0a, + 0x3f, 0x0a, 0x04, 0x04, 0x19, 0x02, 0x03, 0x12, 0x04, 0xa9, 0x02, 0x02, 0x18, 0x22, 0x31, 0x20, + 0x46, 0x72, 0x65, 0x65, 0x2d, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x03, 0x06, 0x12, 0x04, 0xa9, 0x02, 0x02, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x03, 0x01, 0x12, 0x04, 0xa9, 0x02, 0x0b, 0x13, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x19, 0x02, 0x03, 0x03, 0x12, 0x04, 0xa9, 0x02, 0x16, 0x17, 0x0a, 0x0c, 0x0a, + 0x04, 0x04, 0x19, 0x02, 0x04, 0x12, 0x04, 0xaa, 0x02, 0x02, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x19, 0x02, 0x04, 0x04, 0x12, 0x04, 0xaa, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, + 0x02, 0x04, 0x06, 0x12, 0x04, 0xaa, 0x02, 0x0b, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, + 0x04, 0x01, 0x12, 0x04, 0xaa, 0x02, 0x15, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x04, + 0x03, 0x12, 0x04, 0xaa, 0x02, 0x22, 0x23, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x19, 0x02, 0x05, 0x12, + 0x04, 0xab, 0x02, 0x02, 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x05, 0x04, 0x12, 0x04, + 0xab, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x05, 0x06, 0x12, 0x04, 0xab, + 0x02, 0x0b, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x05, 0x01, 0x12, 0x04, 0xab, 0x02, + 0x0f, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, 0x05, 0x03, 0x12, 0x04, 0xab, 0x02, 0x16, + 0x17, 0x0a, 0x48, 0x0a, 0x04, 0x04, 0x19, 0x02, 0x06, 0x12, 0x04, 0xac, 0x02, 0x02, 0x2b, 0x22, + 0x3a, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x2c, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, + 0x62, 0x79, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x19, 0x02, 0x06, 0x06, 0x12, 0x04, 0xac, 0x02, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, + 0x02, 0x06, 0x01, 0x12, 0x04, 0xac, 0x02, 0x1c, 0x26, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x19, 0x02, + 0x06, 0x03, 0x12, 0x04, 0xac, 0x02, 0x29, 0x2a, 0x0a, 0x39, 0x0a, 0x02, 0x04, 0x1a, 0x12, 0x06, + 0xb2, 0x02, 0x00, 0xb5, 0x02, 0x01, 0x1a, 0x2b, 0x0a, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x44, 0x61, 0x74, 0x61, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x62, 0x65, 0x6c, 0x6f, + 0x6e, 0x67, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x1a, 0x01, 0x12, 0x04, 0xb2, 0x02, 0x08, 0x14, + 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x1a, 0x02, 0x00, 0x12, 0x04, 0xb3, 0x02, 0x02, 0x12, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x1a, 0x02, 0x00, 0x05, 0x12, 0x04, 0xb3, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x1a, 0x02, 0x00, 0x01, 0x12, 0x04, 0xb3, 0x02, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x1a, 0x02, 0x00, 0x03, 0x12, 0x04, 0xb3, 0x02, 0x10, 0x11, 0x0a, 0x0c, 0x0a, 0x04, 0x04, + 0x1a, 0x02, 0x01, 0x12, 0x04, 0xb4, 0x02, 0x02, 0x23, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1a, 0x02, + 0x01, 0x06, 0x12, 0x04, 0xb4, 0x02, 0x02, 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1a, 0x02, 0x01, + 0x01, 0x12, 0x04, 0xb4, 0x02, 0x19, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1a, 0x02, 0x01, 0x03, + 0x12, 0x04, 0xb4, 0x02, 0x21, 0x22, 0x0a, 0x84, 0x01, 0x0a, 0x02, 0x04, 0x1b, 0x12, 0x06, 0xbb, + 0x02, 0x00, 0xbf, 0x02, 0x01, 0x1a, 0x76, 0x0a, 0x20, 0x54, 0x61, 0x67, 0x20, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x75, 0x6e, 0x69, + 0x71, 0x75, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x2e, 0x20, 0x49, 0x74, 0x20, 0x69, 0x73, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, + 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x20, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x20, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x0a, 0x20, 0x63, 0x61, 0x6e, + 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x76, 0x65, 0x64, 0x20, 0x62, 0x79, + 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6c, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, + 0x03, 0x04, 0x1b, 0x01, 0x12, 0x04, 0xbb, 0x02, 0x08, 0x0b, 0x0a, 0x1b, 0x0a, 0x04, 0x04, 0x1b, + 0x02, 0x00, 0x12, 0x04, 0xbc, 0x02, 0x02, 0x12, 0x22, 0x0d, 0x20, 0x4e, 0x61, 0x6d, 0x65, 0x20, + 0x6f, 0x66, 0x20, 0x74, 0x61, 0x67, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, 0x00, 0x05, + 0x12, 0x04, 0xbc, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, 0x00, 0x01, 0x12, + 0x04, 0xbc, 0x02, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, 0x00, 0x03, 0x12, 0x04, + 0xbc, 0x02, 0x10, 0x11, 0x0a, 0x23, 0x0a, 0x04, 0x04, 0x1b, 0x02, 0x01, 0x12, 0x04, 0xbd, 0x02, + 0x02, 0x19, 0x22, 0x15, 0x20, 0x54, 0x68, 0x65, 0x20, 0x74, 0x61, 0x67, 0x67, 0x65, 0x64, 0x20, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, + 0x01, 0x05, 0x12, 0x04, 0xbd, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, 0x01, + 0x01, 0x12, 0x04, 0xbd, 0x02, 0x09, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, 0x02, 0x01, 0x03, + 0x12, 0x04, 0xbd, 0x02, 0x17, 0x18, 0x0a, 0x34, 0x0a, 0x04, 0x04, 0x1b, 0x02, 0x02, 0x12, 0x04, + 0xbe, 0x02, 0x02, 0x18, 0x22, 0x26, 0x20, 0x54, 0x68, 0x65, 0x20, 0x44, 0x61, 0x74, 0x61, 0x73, + 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x67, + 0x20, 0x62, 0x65, 0x6c, 0x6f, 0x6e, 0x67, 0x73, 0x20, 0x74, 0x6f, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x1b, 0x02, 0x02, 0x06, 0x12, 0x04, 0xbe, 0x02, 0x02, 0x0b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x1b, 0x02, 0x02, 0x01, 0x12, 0x04, 0xbe, 0x02, 0x0c, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1b, + 0x02, 0x02, 0x03, 0x12, 0x04, 0xbe, 0x02, 0x16, 0x17, 0x0a, 0x43, 0x0a, 0x02, 0x04, 0x1c, 0x12, + 0x06, 0xc4, 0x02, 0x00, 0xc6, 0x02, 0x01, 0x1a, 0x35, 0x0a, 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x73, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x73, 0x0a, 0x0a, 0x0b, + 0x0a, 0x03, 0x04, 0x1c, 0x01, 0x12, 0x04, 0xc4, 0x02, 0x08, 0x10, 0x0a, 0x52, 0x0a, 0x04, 0x04, + 0x1c, 0x02, 0x00, 0x12, 0x04, 0xc5, 0x02, 0x02, 0x22, 0x22, 0x44, 0x20, 0x6b, 0x65, 0x79, 0x20, + 0x6d, 0x61, 0x70, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x64, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x72, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x6b, 0x65, 0x79, 0x2f, 0x76, 0x61, 0x6c, 0x20, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x70, 0x72, + 0x65, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x1c, 0x02, 0x00, 0x06, 0x12, 0x04, 0xc5, 0x02, 0x02, 0x15, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x1c, 0x02, 0x00, 0x01, 0x12, 0x04, 0xc5, 0x02, 0x16, 0x1d, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x1c, 0x02, 0x00, 0x03, 0x12, 0x04, 0xc5, 0x02, 0x20, 0x21, 0x0a, 0x55, 0x0a, 0x02, + 0x04, 0x1d, 0x12, 0x06, 0xc9, 0x02, 0x00, 0xcb, 0x02, 0x01, 0x1a, 0x47, 0x20, 0x46, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x20, 0x65, 0x78, 0x70, 0x72, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x73, 0x65, 0x64, 0x20, + 0x6f, 0x66, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x6f, 0x66, 0x20, 0x73, 0x69, 0x6e, 0x67, 0x6c, 0x65, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x73, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x1d, 0x01, 0x12, 0x04, 0xc9, 0x02, 0x08, 0x18, + 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x1d, 0x02, 0x00, 0x12, 0x04, 0xca, 0x02, 0x02, 0x2c, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x1d, 0x02, 0x00, 0x04, 0x12, 0x04, 0xca, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x1d, 0x02, 0x00, 0x06, 0x12, 0x04, 0xca, 0x02, 0x0b, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x1d, 0x02, 0x00, 0x01, 0x12, 0x04, 0xca, 0x02, 0x20, 0x27, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x1d, 0x02, 0x00, 0x03, 0x12, 0x04, 0xca, 0x02, 0x2a, 0x2b, 0x0a, 0x2f, 0x0a, 0x02, 0x04, 0x1e, + 0x12, 0x06, 0xce, 0x02, 0x00, 0xdd, 0x02, 0x01, 0x1a, 0x21, 0x20, 0x41, 0x20, 0x73, 0x69, 0x6e, + 0x67, 0x6c, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x20, 0x74, 0x6f, 0x20, + 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, + 0x1e, 0x01, 0x12, 0x04, 0xce, 0x02, 0x08, 0x1c, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x1e, 0x08, 0x00, + 0x12, 0x06, 0xcf, 0x02, 0x02, 0xd4, 0x02, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x08, 0x00, + 0x01, 0x12, 0x04, 0xcf, 0x02, 0x08, 0x17, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x1e, 0x02, 0x00, 0x12, + 0x04, 0xd0, 0x02, 0x04, 0x25, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x00, 0x06, 0x12, 0x04, + 0xd0, 0x02, 0x04, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x00, 0x01, 0x12, 0x04, 0xd0, + 0x02, 0x16, 0x20, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x00, 0x03, 0x12, 0x04, 0xd0, 0x02, + 0x23, 0x24, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x1e, 0x02, 0x01, 0x12, 0x04, 0xd1, 0x02, 0x04, 0x31, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x01, 0x06, 0x12, 0x04, 0xd1, 0x02, 0x04, 0x1b, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x01, 0x01, 0x12, 0x04, 0xd1, 0x02, 0x1c, 0x2c, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x01, 0x03, 0x12, 0x04, 0xd1, 0x02, 0x2f, 0x30, 0x0a, 0x0c, 0x0a, + 0x04, 0x04, 0x1e, 0x02, 0x02, 0x12, 0x04, 0xd2, 0x02, 0x04, 0x2f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x1e, 0x02, 0x02, 0x06, 0x12, 0x04, 0xd2, 0x02, 0x04, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, + 0x02, 0x02, 0x01, 0x12, 0x04, 0xd2, 0x02, 0x1b, 0x2a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, + 0x02, 0x03, 0x12, 0x04, 0xd2, 0x02, 0x2d, 0x2e, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x1e, 0x02, 0x03, + 0x12, 0x04, 0xd3, 0x02, 0x04, 0x2d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x03, 0x06, 0x12, + 0x04, 0xd3, 0x02, 0x04, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x03, 0x01, 0x12, 0x04, + 0xd3, 0x02, 0x1a, 0x28, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x03, 0x03, 0x12, 0x04, 0xd3, + 0x02, 0x2b, 0x2c, 0x0a, 0x5c, 0x0a, 0x04, 0x04, 0x1e, 0x04, 0x00, 0x12, 0x06, 0xd7, 0x02, 0x02, + 0xd9, 0x02, 0x03, 0x1a, 0x4c, 0x20, 0x61, 0x73, 0x20, 0x75, 0x73, 0x65, 0x2d, 0x63, 0x61, 0x73, + 0x65, 0x73, 0x20, 0x63, 0x6f, 0x6d, 0x65, 0x20, 0x75, 0x70, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, + 0x6e, 0x20, 0x61, 0x64, 0x64, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x6f, 0x72, 0x73, 0x2c, 0x20, 0x65, 0x78, 0x3a, 0x20, 0x67, 0x74, 0x65, 0x2c, 0x20, 0x6c, + 0x69, 0x6b, 0x65, 0x2c, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x65, 0x71, 0x20, 0x65, 0x74, 0x63, 0x2e, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x04, 0x00, 0x01, 0x12, 0x04, 0xd7, 0x02, 0x07, 0x19, + 0x0a, 0x0e, 0x0a, 0x06, 0x04, 0x1e, 0x04, 0x00, 0x02, 0x00, 0x12, 0x04, 0xd8, 0x02, 0x04, 0x0f, + 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x1e, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x04, 0xd8, 0x02, 0x04, + 0x0a, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x1e, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x04, 0xd8, 0x02, + 0x0d, 0x0e, 0x0a, 0x3e, 0x0a, 0x04, 0x04, 0x1e, 0x02, 0x04, 0x12, 0x04, 0xdb, 0x02, 0x02, 0x23, + 0x22, 0x30, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x31, 0x30, 0x20, 0x69, 0x6e, 0x20, 0x63, + 0x61, 0x73, 0x65, 0x20, 0x77, 0x65, 0x20, 0x61, 0x64, 0x64, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, + 0x65, 0x6e, 0x74, 0x69, 0x74, 0x69, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x04, 0x06, 0x12, 0x04, 0xdb, 0x02, 0x02, + 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x04, 0x01, 0x12, 0x04, 0xdb, 0x02, 0x15, 0x1d, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1e, 0x02, 0x04, 0x03, 0x12, 0x04, 0xdb, 0x02, 0x20, 0x22, 0x0a, + 0x34, 0x0a, 0x02, 0x04, 0x1f, 0x12, 0x06, 0xe0, 0x02, 0x00, 0xe5, 0x02, 0x01, 0x1a, 0x26, 0x20, + 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x20, 0x62, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x1f, 0x01, 0x12, 0x04, 0xe0, 0x02, + 0x08, 0x1e, 0x0a, 0x48, 0x0a, 0x04, 0x04, 0x1f, 0x08, 0x00, 0x12, 0x06, 0xe2, 0x02, 0x02, 0xe4, + 0x02, 0x03, 0x1a, 0x38, 0x20, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, + 0x73, 0x65, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x61, 0x64, 0x64, 0x20, 0x6d, 0x6f, + 0x72, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x20, 0x69, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x1f, 0x08, 0x00, 0x01, 0x12, 0x04, 0xe2, 0x02, 0x08, 0x10, 0x0a, 0x0c, 0x0a, 0x04, 0x04, + 0x1f, 0x02, 0x00, 0x12, 0x04, 0xe3, 0x02, 0x04, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1f, 0x02, + 0x00, 0x05, 0x12, 0x04, 0xe3, 0x02, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1f, 0x02, 0x00, + 0x01, 0x12, 0x04, 0xe3, 0x02, 0x0b, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x1f, 0x02, 0x00, 0x03, + 0x12, 0x04, 0xe3, 0x02, 0x19, 0x1a, 0x0a, 0x2f, 0x0a, 0x02, 0x04, 0x20, 0x12, 0x06, 0xe8, 0x02, + 0x00, 0xec, 0x02, 0x01, 0x1a, 0x21, 0x20, 0x54, 0x61, 0x67, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x20, 0x62, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x20, 0x01, 0x12, 0x04, + 0xe8, 0x02, 0x08, 0x19, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x20, 0x08, 0x00, 0x12, 0x06, 0xe9, 0x02, + 0x02, 0xeb, 0x02, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x20, 0x08, 0x00, 0x01, 0x12, 0x04, 0xe9, + 0x02, 0x08, 0x10, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x20, 0x02, 0x00, 0x12, 0x04, 0xea, 0x02, 0x04, + 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x20, 0x02, 0x00, 0x05, 0x12, 0x04, 0xea, 0x02, 0x04, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x20, 0x02, 0x00, 0x01, 0x12, 0x04, 0xea, 0x02, 0x0b, 0x13, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x20, 0x02, 0x00, 0x03, 0x12, 0x04, 0xea, 0x02, 0x16, 0x17, 0x0a, 0x35, + 0x0a, 0x02, 0x04, 0x21, 0x12, 0x06, 0xef, 0x02, 0x00, 0xf3, 0x02, 0x01, 0x1a, 0x27, 0x20, 0x50, + 0x61, 0x72, 0x74, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, + 0x72, 0x20, 0x62, 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x21, 0x01, 0x12, 0x04, 0xef, 0x02, + 0x08, 0x1f, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x21, 0x08, 0x00, 0x12, 0x06, 0xf0, 0x02, 0x02, 0xf2, + 0x02, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x21, 0x08, 0x00, 0x01, 0x12, 0x04, 0xf0, 0x02, 0x08, + 0x10, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x21, 0x02, 0x00, 0x12, 0x04, 0xf1, 0x02, 0x04, 0x1d, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x21, 0x02, 0x00, 0x06, 0x12, 0x04, 0xf1, 0x02, 0x04, 0x10, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x21, 0x02, 0x00, 0x01, 0x12, 0x04, 0xf1, 0x02, 0x11, 0x18, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x21, 0x02, 0x00, 0x03, 0x12, 0x04, 0xf1, 0x02, 0x1b, 0x1c, 0x0a, 0x0c, 0x0a, 0x02, + 0x04, 0x22, 0x12, 0x06, 0xf5, 0x02, 0x00, 0xf8, 0x02, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x22, + 0x01, 0x12, 0x04, 0xf5, 0x02, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x22, 0x02, 0x00, 0x12, + 0x04, 0xf6, 0x02, 0x02, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x22, 0x02, 0x00, 0x05, 0x12, 0x04, + 0xf6, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x22, 0x02, 0x00, 0x01, 0x12, 0x04, 0xf6, + 0x02, 0x09, 0x0c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x22, 0x02, 0x00, 0x03, 0x12, 0x04, 0xf6, 0x02, + 0x0f, 0x10, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x22, 0x02, 0x01, 0x12, 0x04, 0xf7, 0x02, 0x02, 0x13, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x22, 0x02, 0x01, 0x05, 0x12, 0x04, 0xf7, 0x02, 0x02, 0x08, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x22, 0x02, 0x01, 0x01, 0x12, 0x04, 0xf7, 0x02, 0x09, 0x0e, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x22, 0x02, 0x01, 0x03, 0x12, 0x04, 0xf7, 0x02, 0x11, 0x12, 0x0a, 0x33, 0x0a, + 0x02, 0x04, 0x23, 0x12, 0x06, 0xfb, 0x02, 0x00, 0x84, 0x03, 0x01, 0x1a, 0x25, 0x20, 0x44, 0x61, + 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x20, 0x77, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x20, 0x62, + 0x79, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x23, 0x01, 0x12, 0x04, 0xfb, 0x02, 0x08, 0x1d, 0x0a, + 0x0e, 0x0a, 0x04, 0x04, 0x23, 0x08, 0x00, 0x12, 0x06, 0xfc, 0x02, 0x02, 0x83, 0x03, 0x03, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x08, 0x00, 0x01, 0x12, 0x04, 0xfc, 0x02, 0x08, 0x10, 0x0a, 0x0c, + 0x0a, 0x04, 0x04, 0x23, 0x02, 0x00, 0x12, 0x04, 0xfd, 0x02, 0x04, 0x17, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x23, 0x02, 0x00, 0x05, 0x12, 0x04, 0xfd, 0x02, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x23, 0x02, 0x00, 0x01, 0x12, 0x04, 0xfd, 0x02, 0x0b, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, + 0x02, 0x00, 0x03, 0x12, 0x04, 0xfd, 0x02, 0x15, 0x16, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x23, 0x02, + 0x01, 0x12, 0x04, 0xfe, 0x02, 0x04, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x01, 0x05, + 0x12, 0x04, 0xfe, 0x02, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x01, 0x01, 0x12, + 0x04, 0xfe, 0x02, 0x0b, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x01, 0x03, 0x12, 0x04, + 0xfe, 0x02, 0x12, 0x13, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x23, 0x02, 0x02, 0x12, 0x04, 0xff, 0x02, + 0x04, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x02, 0x05, 0x12, 0x04, 0xff, 0x02, 0x04, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x02, 0x01, 0x12, 0x04, 0xff, 0x02, 0x0b, 0x11, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x02, 0x03, 0x12, 0x04, 0xff, 0x02, 0x14, 0x15, 0x0a, + 0x0c, 0x0a, 0x04, 0x04, 0x23, 0x02, 0x03, 0x12, 0x04, 0x80, 0x03, 0x04, 0x17, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x23, 0x02, 0x03, 0x05, 0x12, 0x04, 0x80, 0x03, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x23, 0x02, 0x03, 0x01, 0x12, 0x04, 0x80, 0x03, 0x0b, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x23, 0x02, 0x03, 0x03, 0x12, 0x04, 0x80, 0x03, 0x15, 0x16, 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x23, + 0x02, 0x04, 0x12, 0x04, 0x82, 0x03, 0x04, 0x13, 0x1a, 0x2b, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x6f, 0x72, 0x67, 0x20, 0x6b, 0x65, 0x79, 0x20, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x04, 0x05, 0x12, 0x04, + 0x82, 0x03, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x04, 0x01, 0x12, 0x04, 0x82, + 0x03, 0x0b, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x23, 0x02, 0x04, 0x03, 0x12, 0x04, 0x82, 0x03, + 0x11, 0x12, 0x0a, 0x3b, 0x0a, 0x02, 0x04, 0x24, 0x12, 0x06, 0x87, 0x03, 0x00, 0x9c, 0x03, 0x01, + 0x1a, 0x2d, 0x20, 0x50, 0x61, 0x67, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6b, 0x69, 0x6e, 0x67, + 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x0a, 0x0a, + 0x0b, 0x0a, 0x03, 0x04, 0x24, 0x01, 0x12, 0x04, 0x87, 0x03, 0x08, 0x19, 0x0a, 0x33, 0x0a, 0x04, + 0x04, 0x24, 0x02, 0x00, 0x12, 0x04, 0x89, 0x03, 0x02, 0x13, 0x1a, 0x25, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6d, 0x61, 0x78, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x00, 0x05, 0x12, 0x04, 0x89, 0x03, 0x02, 0x08, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x00, 0x01, 0x12, 0x04, 0x89, 0x03, 0x09, 0x0e, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x00, 0x03, 0x12, 0x04, 0x89, 0x03, 0x11, 0x12, 0x0a, 0x38, + 0x0a, 0x04, 0x04, 0x24, 0x02, 0x01, 0x12, 0x04, 0x8c, 0x03, 0x02, 0x13, 0x1a, 0x2a, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x74, 0x6f, 0x20, 0x70, 0x61, 0x73, 0x73, + 0x20, 0x74, 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, + 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x01, + 0x05, 0x12, 0x04, 0x8c, 0x03, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x01, 0x01, + 0x12, 0x04, 0x8c, 0x03, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x01, 0x03, 0x12, + 0x04, 0x8c, 0x03, 0x11, 0x12, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x24, 0x02, 0x02, 0x12, 0x04, 0x8f, + 0x03, 0x02, 0x16, 0x1a, 0x32, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x77, 0x65, 0x20, 0x77, 0x61, 0x6e, 0x74, 0x20, + 0x74, 0x6f, 0x20, 0x73, 0x6f, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x73, 0x20, 0x62, 0x79, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x02, 0x06, + 0x12, 0x04, 0x8f, 0x03, 0x02, 0x09, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x02, 0x01, 0x12, + 0x04, 0x8f, 0x03, 0x0a, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x02, 0x03, 0x12, 0x04, + 0x8f, 0x03, 0x14, 0x15, 0x0a, 0x2d, 0x0a, 0x04, 0x04, 0x24, 0x02, 0x03, 0x12, 0x04, 0x92, 0x03, + 0x02, 0x1a, 0x1a, 0x1f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x6f, 0x72, 0x74, 0x20, 0x6f, 0x72, + 0x64, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x03, 0x06, 0x12, 0x04, 0x92, 0x03, + 0x02, 0x0b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x03, 0x01, 0x12, 0x04, 0x92, 0x03, 0x0c, + 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x02, 0x03, 0x03, 0x12, 0x04, 0x92, 0x03, 0x18, 0x19, + 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x24, 0x04, 0x00, 0x12, 0x06, 0x94, 0x03, 0x02, 0x97, 0x03, 0x03, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x04, 0x00, 0x01, 0x12, 0x04, 0x94, 0x03, 0x07, 0x10, 0x0a, + 0x0e, 0x0a, 0x06, 0x04, 0x24, 0x04, 0x00, 0x02, 0x00, 0x12, 0x04, 0x95, 0x03, 0x04, 0x13, 0x0a, + 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x04, 0x95, 0x03, 0x04, 0x0e, + 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x04, 0x95, 0x03, 0x11, + 0x12, 0x0a, 0x0e, 0x0a, 0x06, 0x04, 0x24, 0x04, 0x00, 0x02, 0x01, 0x12, 0x04, 0x96, 0x03, 0x04, + 0x12, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x04, 0x96, 0x03, + 0x04, 0x0d, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x00, 0x02, 0x01, 0x02, 0x12, 0x04, 0x96, + 0x03, 0x10, 0x11, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x24, 0x04, 0x01, 0x12, 0x06, 0x99, 0x03, 0x02, + 0x9b, 0x03, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x24, 0x04, 0x01, 0x01, 0x12, 0x04, 0x99, 0x03, + 0x07, 0x0e, 0x0a, 0x0e, 0x0a, 0x06, 0x04, 0x24, 0x04, 0x01, 0x02, 0x00, 0x12, 0x04, 0x9a, 0x03, + 0x04, 0x16, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x04, 0x9a, + 0x03, 0x04, 0x11, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x24, 0x04, 0x01, 0x02, 0x00, 0x02, 0x12, 0x04, + 0x9a, 0x03, 0x14, 0x15, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +]; +include!("flyteidl2.datacatalog.tonic.rs"); +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.datacatalog.tonic.rs b/gen/rust/src/flyteidl2.datacatalog.tonic.rs new file mode 100644 index 0000000000..d1a7f40a48 --- /dev/null +++ b/gen/rust/src/flyteidl2.datacatalog.tonic.rs @@ -0,0 +1,1005 @@ +// @generated +/// Generated client implementations. +pub mod data_catalog_client { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + #[derive(Debug, Clone)] + pub struct DataCatalogClient { + inner: tonic::client::Grpc, + } + impl DataCatalogClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl DataCatalogClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + Send + 'static, + ::Error: Into + Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> DataCatalogClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + Send + Sync, + { + DataCatalogClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + pub async fn create_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/CreateDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "CreateDataset"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_dataset( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/GetDataset", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "GetDataset"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn create_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/CreateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.datacatalog.DataCatalog", + "CreateArtifact", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/GetArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "GetArtifact"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn add_tag( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/AddTag", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "AddTag")); + self.inner.unary(req, path, codec).await + } + pub async fn list_artifacts( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/ListArtifacts", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "ListArtifacts"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn list_datasets( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/ListDatasets", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("flyteidl2.datacatalog.DataCatalog", "ListDatasets"), + ); + self.inner.unary(req, path, codec).await + } + pub async fn update_artifact( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/UpdateArtifact", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.datacatalog.DataCatalog", + "UpdateArtifact", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn get_or_extend_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.datacatalog.DataCatalog", + "GetOrExtendReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + pub async fn release_reservation( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/flyteidl2.datacatalog.DataCatalog/ReleaseReservation", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "flyteidl2.datacatalog.DataCatalog", + "ReleaseReservation", + ), + ); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod data_catalog_server { + #![allow(unused_variables, dead_code, missing_docs, clippy::let_unit_value)] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with DataCatalogServer. + #[async_trait] + pub trait DataCatalog: Send + Sync + 'static { + async fn create_dataset( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_dataset( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn create_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn add_tag( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + async fn list_artifacts( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn list_datasets( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn update_artifact( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn get_or_extend_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + async fn release_reservation( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + #[derive(Debug)] + pub struct DataCatalogServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl DataCatalogServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for DataCatalogServer + where + T: DataCatalog, + B: Body + Send + 'static, + B::Error: Into + Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/flyteidl2.datacatalog.DataCatalog/CreateDataset" => { + #[allow(non_camel_case_types)] + struct CreateDatasetSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for CreateDatasetSvc { + type Response = super::CreateDatasetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_dataset(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = CreateDatasetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/GetDataset" => { + #[allow(non_camel_case_types)] + struct GetDatasetSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetDatasetSvc { + type Response = super::GetDatasetResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_dataset(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetDatasetSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/CreateArtifact" => { + #[allow(non_camel_case_types)] + struct CreateArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for CreateArtifactSvc { + type Response = super::CreateArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::create_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = CreateArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/GetArtifact" => { + #[allow(non_camel_case_types)] + struct GetArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetArtifactSvc { + type Response = super::GetArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/AddTag" => { + #[allow(non_camel_case_types)] + struct AddTagSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for AddTagSvc { + type Response = super::AddTagResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::add_tag(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = AddTagSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/ListArtifacts" => { + #[allow(non_camel_case_types)] + struct ListArtifactsSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ListArtifactsSvc { + type Response = super::ListArtifactsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_artifacts(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListArtifactsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/ListDatasets" => { + #[allow(non_camel_case_types)] + struct ListDatasetsSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ListDatasetsSvc { + type Response = super::ListDatasetsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::list_datasets(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ListDatasetsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/UpdateArtifact" => { + #[allow(non_camel_case_types)] + struct UpdateArtifactSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for UpdateArtifactSvc { + type Response = super::UpdateArtifactResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_artifact(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = UpdateArtifactSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/GetOrExtendReservation" => { + #[allow(non_camel_case_types)] + struct GetOrExtendReservationSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for GetOrExtendReservationSvc { + type Response = super::GetOrExtendReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_or_extend_reservation( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetOrExtendReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/flyteidl2.datacatalog.DataCatalog/ReleaseReservation" => { + #[allow(non_camel_case_types)] + struct ReleaseReservationSvc(pub Arc); + impl< + T: DataCatalog, + > tonic::server::UnaryService + for ReleaseReservationSvc { + type Response = super::ReleaseReservationResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::release_reservation(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ReleaseReservationSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + Ok( + http::Response::builder() + .status(200) + .header("grpc-status", tonic::Code::Unimplemented as i32) + .header( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ) + .body(empty_body()) + .unwrap(), + ) + }) + } + } + } + } + impl Clone for DataCatalogServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + impl tonic::server::NamedService for DataCatalogServer { + const NAME: &'static str = "flyteidl2.datacatalog.DataCatalog"; + } +} diff --git a/gen/rust/src/flyteidl2.event.rs b/gen/rust/src/flyteidl2.event.rs new file mode 100644 index 0000000000..b9d8ff8be6 --- /dev/null +++ b/gen/rust/src/flyteidl2.event.rs @@ -0,0 +1,2050 @@ +// @generated +// This file is @generated by prost-build. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowExecutionEvent { + /// Workflow execution id + #[prost(message, optional, tag="1")] + pub execution_id: ::core::option::Option, + /// the id of the originator (Propeller) of the event + #[prost(string, tag="2")] + pub producer_id: ::prost::alloc::string::String, + #[prost(enumeration="super::core::workflow_execution::Phase", tag="3")] + pub phase: i32, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the workflow. + #[prost(message, optional, tag="4")] + pub occurred_at: ::core::option::Option, + #[prost(oneof="workflow_execution_event::OutputResult", tags="5, 6, 7")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `WorkflowExecutionEvent`. +pub mod workflow_execution_event { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URL to the output of the execution, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag="5")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag="6")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this workflow execution. + #[prost(message, tag="7")] + OutputData(super::super::core::LiteralMap), + } +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NodeExecutionEvent { + /// Unique identifier for this node execution + #[prost(message, optional, tag="1")] + pub id: ::core::option::Option, + /// the id of the originator (Propeller) of the event + #[prost(string, tag="2")] + pub producer_id: ::prost::alloc::string::String, + #[prost(enumeration="super::core::node_execution::Phase", tag="3")] + pub phase: i32, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the node. + #[prost(message, optional, tag="4")] + pub occurred_at: ::core::option::Option, + /// \[To be deprecated\] Specifies which task (if any) launched this node. + #[prost(message, optional, tag="9")] + pub parent_task_metadata: ::core::option::Option, + /// Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + #[prost(message, optional, tag="10")] + pub parent_node_metadata: ::core::option::Option, + /// Retry group to indicate grouping of nodes by retries + #[prost(string, tag="11")] + pub retry_group: ::prost::alloc::string::String, + /// Identifier of the node in the original workflow/graph + /// This maps to value of WorkflowTemplate.nodes\[X\].id + #[prost(string, tag="12")] + pub spec_node_id: ::prost::alloc::string::String, + /// Friendly readable name for the node + #[prost(string, tag="13")] + pub node_name: ::prost::alloc::string::String, + #[prost(int32, tag="16")] + pub event_version: i32, + /// Whether this node launched a subworkflow. + #[prost(bool, tag="17")] + pub is_parent: bool, + /// Whether this node yielded a dynamic workflow. + #[prost(bool, tag="18")] + pub is_dynamic: bool, + /// String location uniquely identifying where the deck HTML file is + /// NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + #[prost(string, tag="19")] + pub deck_uri: ::prost::alloc::string::String, + /// This timestamp represents the instant when the event was reported by the executing framework. For example, + /// when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + /// literal inputs are initially copied. The event however will not be sent until after the copy completes. + /// Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + #[prost(message, optional, tag="21")] + pub reported_at: ::core::option::Option, + /// Indicates if this node is an ArrayNode. + #[prost(bool, tag="22")] + pub is_array: bool, + /// So that Admin doesn't have to rebuild the node execution graph to find the target entity, propeller will fill this + /// in optionally - currently this is only filled in for subworkflows. This is the ID of the subworkflow corresponding + /// to this node execution. It is difficult to find because Admin only sees one node at a time. A subworkflow could be + /// nested multiple layers deep, and you'd need to access the correct workflow template to know the target subworkflow. + #[prost(message, optional, tag="23")] + pub target_entity: ::core::option::Option, + /// Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of + /// the tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not + /// even registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea + /// if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, + /// as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. + #[prost(bool, tag="24")] + pub is_in_dynamic_chain: bool, + /// Whether this node launched an eager task. + #[prost(bool, tag="25")] + pub is_eager: bool, + #[prost(oneof="node_execution_event::InputValue", tags="5, 20")] + pub input_value: ::core::option::Option, + #[prost(oneof="node_execution_event::OutputResult", tags="6, 7, 15")] + pub output_result: ::core::option::Option, + /// Additional metadata to do with this event's node target based + /// on the node type + #[prost(oneof="node_execution_event::TargetMetadata", tags="8, 14")] + pub target_metadata: ::core::option::Option, +} +/// Nested message and enum types in `NodeExecutionEvent`. +pub mod node_execution_event { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputValue { + #[prost(string, tag="5")] + InputUri(::prost::alloc::string::String), + /// Raw input data consumed by this node execution. + #[prost(message, tag="20")] + InputData(super::super::core::LiteralMap), + } + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URL to the output of the execution, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag="6")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag="7")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this node execution. + #[prost(message, tag="15")] + OutputData(super::super::core::LiteralMap), + } + /// Additional metadata to do with this event's node target based + /// on the node type + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum TargetMetadata { + #[prost(message, tag="8")] + WorkflowNodeMetadata(super::WorkflowNodeMetadata), + #[prost(message, tag="14")] + TaskNodeMetadata(super::TaskNodeMetadata), + } +} +/// For Workflow Nodes we need to send information about the workflow that's launched +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct WorkflowNodeMetadata { + #[prost(message, optional, tag="1")] + pub execution_id: ::core::option::Option, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskNodeMetadata { + /// Captures the status of caching for this execution. + #[prost(enumeration="super::core::CatalogCacheStatus", tag="1")] + pub cache_status: i32, + /// This structure carries the catalog artifact information + #[prost(message, optional, tag="2")] + pub catalog_key: ::core::option::Option, + /// Captures the status of cache reservations for this execution. + #[prost(enumeration="super::core::catalog_reservation::Status", tag="3")] + pub reservation_status: i32, + /// The latest checkpoint location + #[prost(string, tag="4")] + pub checkpoint_uri: ::prost::alloc::string::String, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParentTaskExecutionMetadata { + #[prost(message, optional, tag="1")] + pub id: ::core::option::Option, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ParentNodeExecutionMetadata { + /// Unique identifier of the parent node id within the execution + /// This is value of core.NodeExecutionIdentifier.node_id of the parent node + #[prost(string, tag="1")] + pub node_id: ::prost::alloc::string::String, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EventReason { + /// An explanation for this event + #[prost(string, tag="1")] + pub reason: ::prost::alloc::string::String, + /// The time this reason occurred + #[prost(message, optional, tag="2")] + pub occurred_at: ::core::option::Option, +} +/// Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionEvent { + /// ID of the task. In combination with the retryAttempt this will indicate + /// the task execution uniquely for a given parent node execution. + #[prost(message, optional, tag="1")] + pub task_id: ::core::option::Option, + /// A task execution is always kicked off by a node execution, the event consumer + /// will use the parent_id to relate the task to it's parent node execution + #[prost(message, optional, tag="2")] + pub parent_node_execution_id: ::core::option::Option, + /// retry attempt number for this task, ie., 2 for the second attempt + #[prost(uint32, tag="3")] + pub retry_attempt: u32, + /// Phase associated with the event + #[prost(enumeration="super::core::task_execution::Phase", tag="4")] + pub phase: i32, + /// id of the process that sent this event, mainly for trace debugging + #[prost(string, tag="5")] + pub producer_id: ::prost::alloc::string::String, + /// log information for the task execution + #[prost(message, repeated, tag="6")] + pub logs: ::prost::alloc::vec::Vec, + /// This timestamp represents when the original event occurred, it is generated + /// by the executor of the task. + #[prost(message, optional, tag="7")] + pub occurred_at: ::core::option::Option, + /// Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + #[prost(message, optional, tag="11")] + pub custom_info: ::core::option::Option, + /// Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + /// that should be recorded regardless of the lack of phase change. + /// The version field should be incremented when metadata changes across the duration of an individual phase. + #[prost(uint32, tag="12")] + pub phase_version: u32, + /// An optional explanation for the phase transition. + /// Deprecated: Use reasons instead. + #[deprecated] + #[prost(string, tag="13")] + pub reason: ::prost::alloc::string::String, + /// An optional list of explanations for the phase transition. + #[prost(message, repeated, tag="21")] + pub reasons: ::prost::alloc::vec::Vec, + /// A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + /// this type will be identical, but not all task executions necessarily use pre-registered definitions and this + /// type is useful to render the task in the UI, filter task executions, etc. + #[prost(string, tag="14")] + pub task_type: ::prost::alloc::string::String, + /// Metadata around how a task was executed. + #[prost(message, optional, tag="16")] + pub metadata: ::core::option::Option, + /// The event version is used to indicate versioned changes in how data is reported using this + /// proto message. For example, event_verison > 0 means that maps tasks report logs using the + /// TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + /// in this message. + #[prost(int32, tag="18")] + pub event_version: i32, + /// This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + /// pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + /// but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + /// facilitates a more accurate portrayal of the evaluation time-series. + #[prost(message, optional, tag="20")] + pub reported_at: ::core::option::Option, + /// Contains metadata required to identify logs related to this task execution + #[prost(message, optional, tag="22")] + pub log_context: ::core::option::Option, + #[prost(oneof="task_execution_event::InputValue", tags="8, 19")] + pub input_value: ::core::option::Option, + #[prost(oneof="task_execution_event::OutputResult", tags="9, 10, 17")] + pub output_result: ::core::option::Option, +} +/// Nested message and enum types in `TaskExecutionEvent`. +pub mod task_execution_event { + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum InputValue { + /// URI of the input file, it encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag="8")] + InputUri(::prost::alloc::string::String), + /// Raw input data consumed by this task execution. + #[prost(message, tag="19")] + InputData(super::super::core::LiteralMap), + } + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum OutputResult { + /// URI to the output of the execution, it will be in a format that encodes all the information + /// including Cloud source provider. ie., s3://... + #[prost(string, tag="9")] + OutputUri(::prost::alloc::string::String), + /// Error information for the execution + #[prost(message, tag="10")] + Error(super::super::core::ExecutionError), + /// Raw output data produced by this task execution. + #[prost(message, tag="17")] + OutputData(super::super::core::LiteralMap), + } +} +/// This message contains metadata about external resources produced or used by a specific task execution. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ExternalResourceInfo { + /// Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + #[prost(string, tag="1")] + pub external_id: ::prost::alloc::string::String, + /// A unique index for the external resource with respect to all external resources for this task. Although the + /// identifier may change between task reporting events or retries, this will remain the same to enable aggregating + /// information from multiple reports. + #[prost(uint32, tag="2")] + pub index: u32, + /// Retry attempt number for this external resource, ie., 2 for the second attempt + #[prost(uint32, tag="3")] + pub retry_attempt: u32, + /// Phase associated with the external resource + #[prost(enumeration="super::core::task_execution::Phase", tag="4")] + pub phase: i32, + /// Captures the status of caching for this external resource execution. + #[prost(enumeration="super::core::CatalogCacheStatus", tag="5")] + pub cache_status: i32, + /// log information for the external resource execution + #[prost(message, repeated, tag="6")] + pub logs: ::prost::alloc::vec::Vec, + /// Extensible field for custom, plugin-specific info + #[prost(message, optional, tag="8")] + pub custom_info: ::core::option::Option, + /// Contains metadata required to identify logs related to this task execution + #[prost(message, optional, tag="9")] + pub log_context: ::core::option::Option, + /// Additional metadata to do with this event's node target based on the node type. We are + /// explicitly not including the task_node_metadata here because it is not clear if it is needed. + /// If we decide to include in the future, we should deprecate the cache_status field. + #[prost(oneof="external_resource_info::TargetMetadata", tags="7")] + pub target_metadata: ::core::option::Option, +} +/// Nested message and enum types in `ExternalResourceInfo`. +pub mod external_resource_info { + /// Additional metadata to do with this event's node target based on the node type. We are + /// explicitly not including the task_node_metadata here because it is not clear if it is needed. + /// If we decide to include in the future, we should deprecate the cache_status field. + #[pyo3::pyclass(dict, get_all, set_all)] + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum TargetMetadata { + #[prost(message, tag="7")] + WorkflowNodeMetadata(super::WorkflowNodeMetadata), + } +} +/// This message holds task execution metadata specific to resource allocation used to manage concurrent +/// executions for a project namespace. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ResourcePoolInfo { + /// Unique resource ID used to identify this execution when allocating a token. + #[prost(string, tag="1")] + pub allocation_token: ::prost::alloc::string::String, + /// Namespace under which this task execution requested an allocation token. + #[prost(string, tag="2")] + pub namespace: ::prost::alloc::string::String, +} +/// Holds metadata around how a task was executed. +/// As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, +/// and more may grow in size but not change necessarily based on the phase transition that sparked the event update. +/// Metadata is a container for these attributes across the task execution lifecycle. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TaskExecutionMetadata { + /// Unique, generated name for this task execution used by the backend. + #[prost(string, tag="1")] + pub generated_name: ::prost::alloc::string::String, + /// Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + #[prost(message, repeated, tag="2")] + pub external_resources: ::prost::alloc::vec::Vec, + /// Includes additional data on concurrent resource management used during execution.. + /// This is a repeated field because a plugin can request multiple resource allocations during execution. + #[prost(message, repeated, tag="3")] + pub resource_pool_info: ::prost::alloc::vec::Vec, + /// The identifier of the plugin used to execute this task. + #[prost(string, tag="4")] + pub plugin_identifier: ::prost::alloc::string::String, + #[prost(enumeration="task_execution_metadata::InstanceClass", tag="16")] + pub instance_class: i32, +} +/// Nested message and enum types in `TaskExecutionMetadata`. +pub mod task_execution_metadata { + /// Includes the broad category of machine used for this specific task execution. + #[pyo3::pyclass(dict, get_all, set_all)] + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] + #[repr(i32)] + pub enum InstanceClass { + /// The default instance class configured for the flyte application platform. + Default = 0, + /// The instance class configured for interruptible tasks. + Interruptible = 1, + } + impl InstanceClass { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + InstanceClass::Default => "DEFAULT", + InstanceClass::Interruptible => "INTERRUPTIBLE", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "DEFAULT" => Some(Self::Default), + "INTERRUPTIBLE" => Some(Self::Interruptible), + _ => None, + } + } + } +} +/// This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional +/// information that downstream consumers may find useful. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloudEventWorkflowExecution { + #[prost(message, optional, tag="1")] + pub raw_event: ::core::option::Option, + #[prost(message, optional, tag="2")] + pub output_interface: ::core::option::Option, + /// The following are ExecutionMetadata fields + /// We can't have the ExecutionMetadata object directly because of import cycle + #[prost(message, repeated, tag="3")] + pub artifact_ids: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag="4")] + pub reference_execution: ::core::option::Option, + #[prost(string, tag="5")] + pub principal: ::prost::alloc::string::String, + /// The ID of the LP that generated the execution that generated the Artifact. + /// Here for provenance information. + /// Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + #[prost(message, optional, tag="6")] + pub launch_plan_id: ::core::option::Option, + /// We can't have the ExecutionMetadata object directly because of import cycle + #[prost(map="string, string", tag="7")] + pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloudEventNodeExecution { + #[prost(message, optional, tag="1")] + pub raw_event: ::core::option::Option, + /// The relevant task execution if applicable + #[prost(message, optional, tag="2")] + pub task_exec_id: ::core::option::Option, + /// The typed interface for the task that produced the event. + #[prost(message, optional, tag="3")] + pub output_interface: ::core::option::Option, + /// The following are ExecutionMetadata fields + /// We can't have the ExecutionMetadata object directly because of import cycle + #[prost(message, repeated, tag="4")] + pub artifact_ids: ::prost::alloc::vec::Vec, + #[prost(string, tag="5")] + pub principal: ::prost::alloc::string::String, + /// The ID of the LP that generated the execution that generated the Artifact. + /// Here for provenance information. + /// Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + #[prost(message, optional, tag="6")] + pub launch_plan_id: ::core::option::Option, + /// We can't have the ExecutionMetadata object directly because of import cycle + #[prost(map="string, string", tag="7")] + pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloudEventTaskExecution { + #[prost(message, optional, tag="1")] + pub raw_event: ::core::option::Option, + /// We can't have the ExecutionMetadata object directly because of import cycle + #[prost(map="string, string", tag="2")] + pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, +} +/// This event is to be sent by Admin after it creates an execution. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CloudEventExecutionStart { + /// The execution created. + #[prost(message, optional, tag="1")] + pub execution_id: ::core::option::Option, + /// The launch plan used. + #[prost(message, optional, tag="2")] + pub launch_plan_id: ::core::option::Option, + #[prost(message, optional, tag="3")] + pub workflow_id: ::core::option::Option, + /// Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. + #[prost(message, repeated, tag="4")] + pub artifact_ids: ::prost::alloc::vec::Vec, + /// Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. + #[prost(string, repeated, tag="5")] + pub artifact_trackers: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(string, tag="6")] + pub principal: ::prost::alloc::string::String, +} +/// Encoded file descriptor set for the `flyteidl2.event` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0xf7, 0x96, 0x01, 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x1a, 0x1c, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, + 0x65, 0x2f, 0x6c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0xae, 0x03, 0x0a, 0x16, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x05, 0x70, + 0x68, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, + 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, + 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x36, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x12, 0x3d, 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, + 0x70, 0x48, 0x00, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x42, + 0x0f, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x22, 0xbe, 0x0a, 0x0a, 0x12, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x37, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, + 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, + 0x64, 0x12, 0x39, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x3b, 0x0a, 0x0b, + 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, + 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, + 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x55, 0x72, 0x69, 0x12, 0x36, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x72, 0x72, 0x6f, 0x72, 0x48, 0x01, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3d, + 0x0a, 0x0b, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, + 0x01, 0x52, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x5d, 0x0a, + 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x48, 0x02, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x51, 0x0a, 0x12, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4e, + 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x02, 0x52, 0x10, 0x74, + 0x61, 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x5e, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x5e, 0x0a, 0x14, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, + 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x12, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x1f, 0x0a, 0x0b, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x74, 0x72, 0x79, 0x47, 0x72, 0x6f, 0x75, 0x70, + 0x12, 0x20, 0x0a, 0x0c, 0x73, 0x70, 0x65, 0x63, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, + 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x70, 0x65, 0x63, 0x4e, 0x6f, 0x64, 0x65, + 0x49, 0x64, 0x12, 0x1b, 0x0a, 0x09, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x6e, 0x6f, 0x64, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x23, 0x0a, 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x10, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x69, 0x73, 0x5f, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x18, 0x11, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x73, 0x50, 0x61, 0x72, 0x65, 0x6e, + 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x18, + 0x12, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x69, 0x73, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, + 0x12, 0x19, 0x0a, 0x08, 0x64, 0x65, 0x63, 0x6b, 0x5f, 0x75, 0x72, 0x69, 0x18, 0x13, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x64, 0x65, 0x63, 0x6b, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x0b, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x61, + 0x72, 0x72, 0x61, 0x79, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x41, 0x72, + 0x72, 0x61, 0x79, 0x12, 0x3f, 0x0a, 0x0d, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0c, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x45, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x12, 0x2d, 0x0a, 0x13, 0x69, 0x73, 0x5f, 0x69, 0x6e, 0x5f, 0x64, 0x79, + 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x18, 0x18, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x10, 0x69, 0x73, 0x49, 0x6e, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x43, 0x68, + 0x61, 0x69, 0x6e, 0x12, 0x19, 0x0a, 0x08, 0x69, 0x73, 0x5f, 0x65, 0x61, 0x67, 0x65, 0x72, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x69, 0x73, 0x45, 0x61, 0x67, 0x65, 0x72, 0x42, 0x0d, + 0x0a, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, + 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x11, + 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x22, 0x66, 0x0a, 0x14, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, + 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x4e, 0x0a, 0x0c, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0b, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x22, 0x9c, 0x02, 0x0a, 0x10, 0x54, 0x61, + 0x73, 0x6b, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x45, + 0x0a, 0x0c, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, + 0x68, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x40, 0x0a, 0x0b, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, + 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, + 0x6c, 0x6f, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x0a, 0x63, 0x61, 0x74, + 0x61, 0x6c, 0x6f, 0x67, 0x4b, 0x65, 0x79, 0x12, 0x58, 0x0a, 0x12, 0x72, 0x65, 0x73, 0x65, 0x72, + 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x11, + 0x72, 0x65, 0x73, 0x65, 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x5f, + 0x75, 0x72, 0x69, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x55, 0x72, 0x69, 0x22, 0x56, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x37, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x02, 0x69, 0x64, + 0x22, 0x36, 0x0a, 0x1b, 0x50, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x17, 0x0a, 0x07, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x6e, 0x6f, 0x64, 0x65, 0x49, 0x64, 0x22, 0x62, 0x0a, 0x0b, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, + 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, + 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x41, 0x74, 0x22, 0xdd, 0x08, 0x0a, + 0x12, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x33, 0x0a, 0x07, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x52, 0x06, 0x74, 0x61, 0x73, 0x6b, 0x49, 0x64, 0x12, 0x60, 0x0a, 0x18, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, 0x6f, 0x64, 0x65, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x52, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x12, + 0x39, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, + 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0a, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x72, 0x49, 0x64, 0x12, 0x2b, 0x0a, 0x04, 0x6c, + 0x6f, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, + 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x6f, 0x63, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x6f, 0x63, 0x63, 0x75, 0x72, + 0x72, 0x65, 0x64, 0x41, 0x74, 0x12, 0x1d, 0x0a, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x75, + 0x72, 0x69, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x55, 0x72, 0x69, 0x12, 0x3b, 0x0a, 0x0a, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x13, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, + 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x00, 0x52, 0x09, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x1f, 0x0a, 0x0a, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x75, 0x72, 0x69, 0x18, + 0x09, 0x20, 0x01, 0x28, 0x09, 0x48, 0x01, 0x52, 0x09, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x55, + 0x72, 0x69, 0x12, 0x36, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x72, 0x72, 0x6f, + 0x72, 0x48, 0x01, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x3d, 0x0a, 0x0b, 0x6f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4c, 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x4d, 0x61, 0x70, 0x48, 0x01, 0x52, 0x0a, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x44, 0x61, 0x74, 0x61, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x68, 0x61, 0x73, 0x65, 0x5f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x70, 0x68, 0x61, 0x73, + 0x65, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x1a, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x06, 0x72, 0x65, + 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x18, + 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x61, + 0x73, 0x6f, 0x6e, 0x52, 0x07, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x79, 0x70, 0x65, 0x12, 0x42, 0x0a, 0x08, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x10, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, + 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x23, 0x0a, + 0x0d, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x3b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x18, 0x14, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, + 0x3b, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x16, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, + 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x0d, 0x0a, 0x0b, + 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x0f, 0x0a, 0x0d, 0x6f, + 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x8a, 0x04, 0x0a, + 0x14, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x05, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x12, 0x23, 0x0a, 0x0d, + 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0d, 0x52, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, + 0x74, 0x12, 0x39, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x50, 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x45, 0x0a, 0x0c, + 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x43, 0x61, 0x63, 0x68, 0x65, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0b, 0x63, 0x61, 0x63, 0x68, 0x65, 0x53, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x2b, 0x0a, 0x04, 0x6c, 0x6f, 0x67, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x04, 0x6c, 0x6f, 0x67, 0x73, + 0x12, 0x5d, 0x0a, 0x16, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x6f, 0x64, + 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x48, 0x00, 0x52, 0x14, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x4e, 0x6f, 0x64, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, + 0x38, 0x0a, 0x0b, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x08, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x3b, 0x0a, 0x0b, 0x6c, 0x6f, 0x67, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x4c, 0x6f, 0x67, 0x43, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x52, 0x0a, 0x6c, 0x6f, 0x67, 0x43, + 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x42, 0x11, 0x0a, 0x0f, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x5b, 0x0a, 0x10, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x29, 0x0a, + 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x22, 0xa0, 0x03, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x12, 0x25, 0x0a, 0x0e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, + 0x74, 0x65, 0x64, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x54, 0x0a, 0x12, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x11, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4f, 0x0a, + 0x12, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x69, + 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x52, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x10, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2b, + 0x0a, 0x11, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x12, 0x5b, 0x0a, 0x0e, 0x69, + 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x52, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x22, 0x2f, 0x0a, 0x0d, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x46, + 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x11, 0x0a, 0x0d, 0x49, 0x4e, 0x54, 0x45, 0x52, 0x52, + 0x55, 0x50, 0x54, 0x49, 0x42, 0x4c, 0x45, 0x10, 0x01, 0x42, 0xb3, 0x01, 0x0a, 0x13, 0x63, 0x6f, + 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x42, 0x0a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0xa2, 0x02, 0x03, 0x46, 0x45, 0x58, 0xaa, 0x02, 0x0f, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xca, 0x02, 0x0f, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xe2, 0x02, + 0x1b, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, 0x74, + 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x10, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4a, + 0xf1, 0x6f, 0x0a, 0x07, 0x12, 0x05, 0x00, 0x00, 0xc5, 0x02, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, + 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x18, 0x0a, + 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x26, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x05, 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x06, 0x00, 0x29, + 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x03, 0x07, 0x00, 0x27, 0x0a, 0x09, 0x0a, 0x02, 0x03, + 0x04, 0x12, 0x03, 0x08, 0x00, 0x26, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x05, 0x12, 0x03, 0x09, 0x00, + 0x29, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x0b, 0x00, 0x4a, 0x0a, 0x09, 0x0a, 0x02, 0x08, + 0x0b, 0x12, 0x03, 0x0b, 0x00, 0x4a, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0d, 0x00, + 0x25, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0d, 0x08, 0x1e, 0x0a, 0x24, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0f, 0x02, 0x34, 0x1a, 0x17, 0x20, 0x57, 0x6f, + 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x69, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0f, + 0x02, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0f, 0x23, 0x2f, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0f, 0x32, 0x33, 0x0a, 0x40, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x12, 0x02, 0x19, 0x1a, 0x33, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x69, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x28, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, + 0x72, 0x29, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x12, 0x02, 0x08, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x12, 0x09, 0x14, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x12, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, + 0x02, 0x02, 0x12, 0x03, 0x14, 0x02, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, + 0x12, 0x03, 0x14, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, + 0x14, 0x1f, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x14, 0x27, + 0x28, 0x0a, 0x7c, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x18, 0x02, 0x2c, 0x1a, 0x6f, + 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, + 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x20, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x20, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x2c, 0x20, 0x69, 0x74, 0x20, + 0x69, 0x73, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x0a, 0x20, 0x62, 0x79, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x06, 0x12, 0x03, 0x18, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x18, 0x1c, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x18, 0x2a, 0x2b, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x00, 0x08, + 0x00, 0x12, 0x04, 0x1a, 0x02, 0x24, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x08, 0x00, 0x01, + 0x12, 0x03, 0x1a, 0x08, 0x15, 0x0a, 0x81, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, + 0x1d, 0x04, 0x1a, 0x1a, 0x74, 0x20, 0x55, 0x52, 0x4c, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x65, 0x6e, 0x63, + 0x6f, 0x64, 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, + 0x69, 0x6e, 0x67, 0x20, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x20, 0x69, 0x65, 0x2e, 0x2c, 0x20, + 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x2e, 0x2e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x1d, 0x04, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x1d, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x1d, 0x18, 0x19, 0x0a, 0x32, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x05, 0x12, 0x03, 0x20, 0x04, 0x22, + 0x1a, 0x25, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x06, + 0x12, 0x03, 0x20, 0x04, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x01, 0x12, 0x03, + 0x20, 0x18, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x03, 0x12, 0x03, 0x20, 0x20, + 0x21, 0x0a, 0x43, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x06, 0x12, 0x03, 0x23, 0x04, 0x24, 0x1a, 0x36, + 0x20, 0x52, 0x61, 0x77, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x06, 0x12, + 0x03, 0x23, 0x04, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x01, 0x12, 0x03, 0x23, + 0x14, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x03, 0x12, 0x03, 0x23, 0x22, 0x23, + 0x0a, 0x0b, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x05, 0x27, 0x00, 0x82, 0x01, 0x01, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x27, 0x08, 0x1a, 0x0a, 0x38, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x00, 0x12, 0x03, 0x29, 0x02, 0x26, 0x1a, 0x2b, 0x20, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, + 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x29, 0x02, + 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x29, 0x1f, 0x21, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x29, 0x24, 0x25, 0x0a, 0x40, 0x0a, + 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x2c, 0x02, 0x19, 0x1a, 0x33, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x69, 0x64, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, + 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x28, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, + 0x29, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x2c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2c, 0x09, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x2c, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x02, 0x12, 0x03, 0x2e, 0x02, 0x25, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x06, 0x12, + 0x03, 0x2e, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2e, + 0x1b, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x2e, 0x23, 0x24, + 0x0a, 0x78, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x32, 0x02, 0x2c, 0x1a, 0x6b, 0x20, + 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, 0x72, + 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x20, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x20, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, + 0x73, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x0a, 0x20, 0x62, 0x79, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x03, 0x06, 0x12, 0x03, 0x32, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, + 0x01, 0x12, 0x03, 0x32, 0x1c, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, + 0x03, 0x32, 0x2a, 0x2b, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x01, 0x08, 0x00, 0x12, 0x04, 0x34, 0x02, + 0x39, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x08, 0x00, 0x01, 0x12, 0x03, 0x34, 0x08, 0x13, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x35, 0x04, 0x19, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x04, 0x05, 0x12, 0x03, 0x35, 0x04, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x35, 0x0b, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x04, 0x03, 0x12, 0x03, 0x35, 0x17, 0x18, 0x0a, 0x3e, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x05, 0x12, + 0x03, 0x38, 0x04, 0x24, 0x1a, 0x31, 0x20, 0x52, 0x61, 0x77, 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x64, 0x20, 0x62, + 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x06, + 0x12, 0x03, 0x38, 0x04, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x01, 0x12, 0x03, + 0x38, 0x14, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x03, 0x12, 0x03, 0x38, 0x21, + 0x23, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x01, 0x08, 0x01, 0x12, 0x04, 0x3b, 0x02, 0x45, 0x03, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x08, 0x01, 0x01, 0x12, 0x03, 0x3b, 0x08, 0x15, 0x0a, 0x81, 0x01, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x06, 0x12, 0x03, 0x3e, 0x04, 0x1a, 0x1a, 0x74, 0x20, 0x55, 0x52, + 0x4c, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, + 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2c, 0x20, 0x69, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x0a, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, + 0x72, 0x2e, 0x20, 0x69, 0x65, 0x2e, 0x2c, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x2e, 0x2e, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x05, 0x12, 0x03, 0x3e, 0x04, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x01, 0x12, 0x03, 0x3e, 0x0b, 0x15, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x06, 0x03, 0x12, 0x03, 0x3e, 0x18, 0x19, 0x0a, 0x32, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x07, 0x12, 0x03, 0x41, 0x04, 0x22, 0x1a, 0x25, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, + 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x07, 0x06, 0x12, 0x03, 0x41, 0x04, 0x17, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x07, 0x01, 0x12, 0x03, 0x41, 0x18, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x07, 0x03, 0x12, 0x03, 0x41, 0x20, 0x21, 0x0a, 0x3f, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x08, 0x12, 0x03, 0x44, 0x04, 0x25, 0x1a, 0x32, 0x20, 0x52, 0x61, 0x77, 0x20, 0x6f, 0x75, 0x74, + 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, + 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x08, 0x06, 0x12, 0x03, 0x44, 0x04, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, + 0x01, 0x12, 0x03, 0x44, 0x14, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, 0x03, 0x12, + 0x03, 0x44, 0x22, 0x24, 0x0a, 0x5f, 0x0a, 0x04, 0x04, 0x01, 0x08, 0x02, 0x12, 0x04, 0x49, 0x02, + 0x4c, 0x03, 0x1a, 0x51, 0x20, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, + 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x6f, 0x20, 0x77, + 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x27, 0x73, + 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x62, 0x61, 0x73, + 0x65, 0x64, 0x0a, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, + 0x74, 0x79, 0x70, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x08, 0x02, 0x01, 0x12, 0x03, + 0x49, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x09, 0x12, 0x03, 0x4a, 0x04, 0x34, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x06, 0x12, 0x03, 0x4a, 0x04, 0x18, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x01, 0x12, 0x03, 0x4a, 0x19, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x09, 0x03, 0x12, 0x03, 0x4a, 0x32, 0x33, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, + 0x02, 0x0a, 0x12, 0x03, 0x4b, 0x04, 0x2d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x06, + 0x12, 0x03, 0x4b, 0x04, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x01, 0x12, 0x03, + 0x4b, 0x15, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x03, 0x12, 0x03, 0x4b, 0x2a, + 0x2c, 0x0a, 0x53, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0b, 0x12, 0x03, 0x4f, 0x02, 0x37, 0x1a, 0x46, + 0x20, 0x5b, 0x54, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, + 0x65, 0x64, 0x5d, 0x20, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x77, 0x68, + 0x69, 0x63, 0x68, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x28, 0x69, 0x66, 0x20, 0x61, 0x6e, 0x79, + 0x29, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x6e, 0x6f, 0x64, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0b, 0x06, 0x12, + 0x03, 0x4f, 0x02, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0b, 0x01, 0x12, 0x03, 0x4f, + 0x1e, 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0b, 0x03, 0x12, 0x03, 0x4f, 0x35, 0x36, + 0x0a, 0x82, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0c, 0x12, 0x03, 0x52, 0x02, 0x38, 0x1a, 0x75, + 0x20, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x64, 0x65, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x74, 0x20, 0x6c, 0x65, 0x76, + 0x65, 0x6c, 0x20, 0x7a, 0x65, 0x72, 0x6f, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x6e, 0x6f, 0x74, + 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x61, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, + 0x6f, 0x64, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0c, 0x06, 0x12, 0x03, + 0x52, 0x02, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0c, 0x01, 0x12, 0x03, 0x52, 0x1e, + 0x32, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0c, 0x03, 0x12, 0x03, 0x52, 0x35, 0x37, 0x0a, + 0x43, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0d, 0x12, 0x03, 0x55, 0x02, 0x1a, 0x1a, 0x36, 0x20, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x20, 0x74, 0x6f, 0x20, 0x69, 0x6e, + 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x69, 0x6e, 0x67, 0x20, + 0x6f, 0x66, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x20, 0x62, 0x79, 0x20, 0x72, 0x65, 0x74, 0x72, + 0x69, 0x65, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0d, 0x05, 0x12, 0x03, 0x55, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0d, 0x01, 0x12, 0x03, 0x55, 0x09, 0x14, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0d, 0x03, 0x12, 0x03, 0x55, 0x17, 0x19, 0x0a, 0x78, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0e, 0x12, 0x03, 0x59, 0x02, 0x1b, 0x1a, 0x6b, 0x20, 0x49, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, + 0x69, 0x6e, 0x61, 0x6c, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x67, 0x72, + 0x61, 0x70, 0x68, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x61, 0x70, 0x73, 0x20, 0x74, + 0x6f, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2e, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x5b, 0x58, 0x5d, 0x2e, 0x69, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0e, + 0x05, 0x12, 0x03, 0x59, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0e, 0x01, 0x12, + 0x03, 0x59, 0x09, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0e, 0x03, 0x12, 0x03, 0x59, + 0x18, 0x1a, 0x0a, 0x32, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0f, 0x12, 0x03, 0x5c, 0x02, 0x18, 0x1a, + 0x25, 0x20, 0x46, 0x72, 0x69, 0x65, 0x6e, 0x64, 0x6c, 0x79, 0x20, 0x72, 0x65, 0x61, 0x64, 0x61, + 0x62, 0x6c, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0f, 0x05, 0x12, + 0x03, 0x5c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0f, 0x01, 0x12, 0x03, 0x5c, + 0x09, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0f, 0x03, 0x12, 0x03, 0x5c, 0x15, 0x17, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x10, 0x12, 0x03, 0x5e, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x10, 0x05, 0x12, 0x03, 0x5e, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x10, 0x01, 0x12, 0x03, 0x5e, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x10, 0x03, 0x12, 0x03, 0x5e, 0x18, 0x1a, 0x0a, 0x38, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x11, 0x12, + 0x03, 0x61, 0x02, 0x16, 0x1a, 0x2b, 0x20, 0x57, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, + 0x64, 0x20, 0x61, 0x20, 0x73, 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x11, 0x05, 0x12, 0x03, 0x61, 0x02, 0x06, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x11, 0x01, 0x12, 0x03, 0x61, 0x07, 0x10, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x11, 0x03, 0x12, 0x03, 0x61, 0x13, 0x15, 0x0a, 0x3c, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x12, 0x12, 0x03, 0x64, 0x02, 0x17, 0x1a, 0x2f, 0x20, 0x57, 0x68, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x79, 0x69, 0x65, + 0x6c, 0x64, 0x65, 0x64, 0x20, 0x61, 0x20, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x20, 0x77, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x12, 0x05, 0x12, 0x03, 0x64, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x12, 0x01, + 0x12, 0x03, 0x64, 0x07, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x12, 0x03, 0x12, 0x03, + 0x64, 0x14, 0x16, 0x0a, 0xcc, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x13, 0x12, 0x03, 0x68, 0x02, + 0x17, 0x1a, 0xbe, 0x01, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x6c, 0x79, 0x20, 0x69, 0x64, + 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x69, 0x6e, 0x67, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x63, 0x6b, 0x20, 0x48, 0x54, 0x4d, 0x4c, 0x20, 0x66, 0x69, + 0x6c, 0x65, 0x20, 0x69, 0x73, 0x0a, 0x20, 0x4e, 0x61, 0x74, 0x69, 0x76, 0x65, 0x55, 0x72, 0x6c, + 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x75, + 0x72, 0x6c, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, + 0x65, 0x64, 0x20, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, + 0x64, 0x65, 0x72, 0x20, 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, + 0x79, 0x2d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x2e, 0x74, 0x61, 0x72, + 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x13, 0x05, 0x12, 0x03, 0x68, 0x02, 0x08, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x13, 0x01, 0x12, 0x03, 0x68, 0x09, 0x11, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x13, 0x03, 0x12, 0x03, 0x68, 0x14, 0x16, 0x0a, 0xbf, 0x03, 0x0a, + 0x04, 0x04, 0x01, 0x02, 0x14, 0x12, 0x03, 0x6e, 0x02, 0x2d, 0x1a, 0xb1, 0x03, 0x20, 0x54, 0x68, + 0x69, 0x73, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, 0x72, 0x65, 0x70, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x74, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x20, 0x77, 0x61, 0x73, 0x20, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, + 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, + 0x20, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x77, 0x6f, 0x72, 0x6b, 0x2e, 0x20, 0x46, 0x6f, 0x72, 0x20, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2c, 0x0a, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x66, + 0x69, 0x72, 0x73, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x69, 0x6e, 0x67, 0x20, + 0x61, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x60, 0x6f, 0x63, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x60, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, + 0x65, 0x72, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, + 0x73, 0x2c, 0x20, 0x73, 0x6f, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x0a, 0x20, 0x6c, 0x69, 0x74, 0x65, + 0x72, 0x61, 0x6c, 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x69, + 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x63, 0x6f, 0x70, 0x69, 0x65, 0x64, 0x2e, + 0x20, 0x54, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x68, 0x6f, 0x77, 0x65, 0x76, + 0x65, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x62, 0x65, 0x20, 0x73, + 0x65, 0x6e, 0x74, 0x20, 0x75, 0x6e, 0x74, 0x69, 0x6c, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x70, 0x79, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, + 0x65, 0x73, 0x2e, 0x0a, 0x20, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6e, 0x67, 0x20, + 0x62, 0x6f, 0x74, 0x68, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x73, 0x65, 0x20, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x20, 0x66, 0x61, 0x63, 0x69, 0x6c, 0x69, 0x74, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x63, 0x63, 0x75, + 0x72, 0x61, 0x74, 0x65, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x72, 0x61, 0x79, 0x61, 0x6c, 0x20, 0x6f, + 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x14, 0x06, 0x12, 0x03, 0x6e, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x14, 0x01, 0x12, 0x03, 0x6e, 0x1c, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x14, 0x03, 0x12, 0x03, 0x6e, 0x2a, 0x2c, 0x0a, 0x36, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x15, + 0x12, 0x03, 0x71, 0x02, 0x15, 0x1a, 0x29, 0x20, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, + 0x73, 0x20, 0x69, 0x66, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x69, + 0x73, 0x20, 0x61, 0x6e, 0x20, 0x41, 0x72, 0x72, 0x61, 0x79, 0x4e, 0x6f, 0x64, 0x65, 0x2e, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x15, 0x05, 0x12, 0x03, 0x71, 0x02, 0x06, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x15, 0x01, 0x12, 0x03, 0x71, 0x07, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x15, 0x03, 0x12, 0x03, 0x71, 0x12, 0x14, 0x0a, 0xdf, 0x03, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x16, 0x12, 0x03, 0x77, 0x02, 0x25, 0x1a, 0xd1, 0x03, 0x20, 0x53, 0x6f, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x6e, 0x27, + 0x74, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, 0x62, 0x75, 0x69, 0x6c, + 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x67, 0x72, 0x61, 0x70, 0x68, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x69, + 0x6e, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x65, 0x6e, + 0x74, 0x69, 0x74, 0x79, 0x2c, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x20, + 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x66, 0x69, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x0a, 0x20, + 0x69, 0x6e, 0x20, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20, 0x2d, 0x20, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x69, + 0x73, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x69, 0x6e, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x73, 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x73, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x49, + 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, + 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x64, 0x69, + 0x6e, 0x67, 0x0a, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x69, + 0x73, 0x20, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x66, + 0x69, 0x6e, 0x64, 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, 0x73, 0x65, 0x20, 0x41, 0x64, 0x6d, 0x69, + 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x73, 0x65, 0x65, 0x73, 0x20, 0x6f, 0x6e, 0x65, 0x20, + 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x61, 0x74, 0x20, 0x61, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x2e, 0x20, + 0x41, 0x20, 0x73, 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x63, 0x6f, + 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x0a, 0x20, 0x6e, 0x65, 0x73, 0x74, 0x65, 0x64, 0x20, 0x6d, + 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x6c, 0x61, 0x79, 0x65, 0x72, 0x73, 0x20, 0x64, + 0x65, 0x65, 0x70, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x79, 0x6f, 0x75, 0x27, 0x64, 0x20, 0x6e, + 0x65, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x63, 0x74, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x6b, + 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x73, + 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x16, 0x06, 0x12, 0x03, 0x77, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x16, 0x01, 0x12, 0x03, 0x77, 0x12, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x16, + 0x03, 0x12, 0x03, 0x77, 0x22, 0x24, 0x0a, 0xc6, 0x04, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x17, 0x12, + 0x03, 0x7e, 0x02, 0x20, 0x1a, 0xb8, 0x04, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x73, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x73, 0x75, 0x62, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x73, 0x20, 0x28, + 0x62, 0x75, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x20, 0x70, + 0x6c, 0x61, 0x6e, 0x73, 0x29, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x72, + 0x75, 0x6e, 0x20, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x64, 0x79, 0x6e, 0x61, + 0x6d, 0x69, 0x63, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x61, 0x72, 0x65, 0x20, 0x65, 0x66, 0x66, + 0x65, 0x63, 0x74, 0x69, 0x76, 0x65, 0x6c, 0x79, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x70, 0x65, 0x6e, + 0x64, 0x65, 0x6e, 0x74, 0x20, 0x6f, 0x66, 0x0a, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x72, 0x65, 0x67, 0x69, + 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x27, + 0x73, 0x20, 0x64, 0x62, 0x2e, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x6c, + 0x79, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6f, 0x66, 0x74, 0x65, + 0x6e, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x2c, 0x20, 0x62, 0x75, 0x74, + 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, 0x74, 0x0a, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x20, 0x72, 0x65, + 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x61, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x2e, + 0x20, 0x53, 0x69, 0x6d, 0x69, 0x6c, 0x61, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x66, 0x69, + 0x65, 0x6c, 0x64, 0x2c, 0x20, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, + 0x20, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x20, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x73, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x68, + 0x61, 0x73, 0x20, 0x6e, 0x6f, 0x20, 0x69, 0x64, 0x65, 0x61, 0x0a, 0x20, 0x69, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74, 0x20, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x69, 0x73, 0x20, + 0x77, 0x61, 0x73, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x2c, 0x20, + 0x6f, 0x72, 0x20, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, + 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, + 0x5f, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x49, 0x44, 0x2c, 0x0a, 0x20, 0x61, 0x73, 0x20, + 0x77, 0x65, 0x6c, 0x6c, 0x20, 0x61, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x49, 0x44, 0x73, + 0x20, 0x69, 0x6e, 0x20, 0x61, 0x6e, 0x79, 0x20, 0x63, 0x6f, 0x72, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x6e, 0x6f, + 0x74, 0x20, 0x62, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x6c, 0x6f, 0x6f, + 0x6b, 0x65, 0x64, 0x20, 0x75, 0x70, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x69, 0x6e, 0x20, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x27, 0x73, 0x20, 0x64, 0x62, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x17, 0x05, 0x12, 0x03, 0x7e, 0x02, 0x06, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x17, 0x01, 0x12, 0x03, 0x7e, 0x07, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x17, 0x03, 0x12, 0x03, 0x7e, 0x1d, 0x1f, 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x18, 0x12, 0x04, 0x81, 0x01, 0x02, 0x15, 0x1a, 0x2b, 0x20, 0x57, 0x68, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x6c, 0x61, 0x75, 0x6e, + 0x63, 0x68, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x61, 0x67, 0x65, 0x72, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x18, 0x05, 0x12, 0x04, 0x81, + 0x01, 0x02, 0x06, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x18, 0x01, 0x12, 0x04, 0x81, 0x01, + 0x07, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x18, 0x03, 0x12, 0x04, 0x81, 0x01, 0x12, + 0x14, 0x0a, 0x61, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x06, 0x85, 0x01, 0x00, 0x87, 0x01, 0x01, 0x1a, + 0x53, 0x20, 0x46, 0x6f, 0x72, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x4e, + 0x6f, 0x64, 0x65, 0x73, 0x20, 0x77, 0x65, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, + 0x73, 0x65, 0x6e, 0x64, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, + 0x6c, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, + 0x68, 0x65, 0x64, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x04, 0x85, 0x01, 0x08, + 0x1c, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x04, 0x86, 0x01, 0x02, 0x34, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x04, 0x86, 0x01, 0x02, 0x22, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x04, 0x86, 0x01, 0x23, 0x2f, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x04, 0x86, 0x01, 0x32, 0x33, 0x0a, 0x0c, 0x0a, 0x02, + 0x04, 0x03, 0x12, 0x06, 0x89, 0x01, 0x00, 0x92, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x03, + 0x01, 0x12, 0x04, 0x89, 0x01, 0x08, 0x18, 0x0a, 0x42, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, + 0x04, 0x8b, 0x01, 0x02, 0x2b, 0x1a, 0x34, 0x20, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x73, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x63, + 0x61, 0x63, 0x68, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x03, 0x02, 0x00, 0x06, 0x12, 0x04, 0x8b, 0x01, 0x02, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x00, 0x01, 0x12, 0x04, 0x8b, 0x01, 0x1a, 0x26, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x00, 0x03, 0x12, 0x04, 0x8b, 0x01, 0x29, 0x2a, 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, + 0x12, 0x04, 0x8d, 0x01, 0x02, 0x27, 0x1a, 0x39, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x73, 0x74, + 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x20, 0x63, 0x61, 0x72, 0x72, 0x69, 0x65, 0x73, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x20, 0x61, 0x72, 0x74, 0x69, + 0x66, 0x61, 0x63, 0x74, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x06, 0x12, 0x04, 0x8d, 0x01, 0x02, 0x16, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x04, 0x8d, 0x01, 0x17, 0x22, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x04, 0x8d, 0x01, 0x25, 0x26, 0x0a, 0x4d, + 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x38, 0x1a, 0x3f, 0x20, 0x43, + 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x65, + 0x72, 0x76, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x02, 0x06, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x20, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x02, 0x01, 0x12, 0x04, 0x8f, 0x01, 0x21, 0x33, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x03, 0x02, 0x02, 0x03, 0x12, 0x04, 0x8f, 0x01, 0x36, 0x37, 0x0a, 0x2e, 0x0a, 0x04, 0x04, 0x03, + 0x02, 0x03, 0x12, 0x04, 0x91, 0x01, 0x02, 0x1c, 0x1a, 0x20, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x74, 0x20, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x20, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x03, 0x05, 0x12, 0x04, 0x91, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x03, 0x01, 0x12, 0x04, 0x91, 0x01, 0x09, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, + 0x03, 0x12, 0x04, 0x91, 0x01, 0x1a, 0x1b, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x06, 0x94, + 0x01, 0x00, 0x96, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x04, 0x94, 0x01, + 0x08, 0x23, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x04, 0x95, 0x01, 0x02, 0x26, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x06, 0x12, 0x04, 0x95, 0x01, 0x02, 0x1e, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, 0x12, 0x04, 0x95, 0x01, 0x1f, 0x21, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x04, 0x95, 0x01, 0x24, 0x25, 0x0a, 0x0c, 0x0a, + 0x02, 0x04, 0x05, 0x12, 0x06, 0x98, 0x01, 0x00, 0x9c, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, + 0x05, 0x01, 0x12, 0x04, 0x98, 0x01, 0x08, 0x23, 0x0a, 0x97, 0x01, 0x0a, 0x04, 0x04, 0x05, 0x02, + 0x00, 0x12, 0x04, 0x9b, 0x01, 0x02, 0x15, 0x1a, 0x88, 0x01, 0x20, 0x55, 0x6e, 0x69, 0x71, 0x75, + 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, + 0x69, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, + 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x69, 0x64, 0x20, 0x6f, + 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, 0x6f, 0x64, + 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x05, 0x12, 0x04, 0x9b, 0x01, 0x02, + 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x04, 0x9b, 0x01, 0x09, 0x10, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x04, 0x9b, 0x01, 0x13, 0x14, 0x0a, + 0x0c, 0x0a, 0x02, 0x04, 0x06, 0x12, 0x06, 0x9e, 0x01, 0x00, 0xa4, 0x01, 0x01, 0x0a, 0x0b, 0x0a, + 0x03, 0x04, 0x06, 0x01, 0x12, 0x04, 0x9e, 0x01, 0x08, 0x13, 0x0a, 0x2d, 0x0a, 0x04, 0x04, 0x06, + 0x02, 0x00, 0x12, 0x04, 0xa0, 0x01, 0x02, 0x14, 0x1a, 0x1f, 0x20, 0x41, 0x6e, 0x20, 0x65, 0x78, + 0x70, 0x6c, 0x61, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, + 0x00, 0x05, 0x12, 0x04, 0xa0, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, + 0x01, 0x12, 0x04, 0xa0, 0x01, 0x09, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, + 0x12, 0x04, 0xa0, 0x01, 0x12, 0x13, 0x0a, 0x2d, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x04, + 0xa3, 0x01, 0x02, 0x2c, 0x1a, 0x1f, 0x20, 0x54, 0x68, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x20, 0x6f, 0x63, 0x63, 0x75, + 0x72, 0x72, 0x65, 0x64, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x06, 0x12, 0x04, + 0xa3, 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x01, 0x12, 0x04, 0xa3, + 0x01, 0x1c, 0x27, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x03, 0x12, 0x04, 0xa3, 0x01, + 0x2a, 0x2b, 0x0a, 0x6c, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x06, 0xa7, 0x01, 0x00, 0xfa, 0x01, 0x01, + 0x1a, 0x5e, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, + 0x69, 0x63, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x20, + 0x46, 0x6f, 0x72, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x50, + 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x2c, 0x20, 0x48, 0x69, 0x76, 0x65, 0x2c, 0x20, 0x53, 0x70, 0x61, + 0x72, 0x6b, 0x2c, 0x20, 0x44, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x4a, 0x6f, 0x62, 0x2e, 0x0a, + 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, 0x04, 0xa7, 0x01, 0x08, 0x1a, 0x0a, 0x98, 0x01, + 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x04, 0xaa, 0x01, 0x02, 0x1e, 0x1a, 0x89, 0x01, 0x20, + 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x20, + 0x49, 0x6e, 0x20, 0x63, 0x6f, 0x6d, 0x62, 0x69, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x77, + 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, + 0x65, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x69, + 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x0a, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x6c, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x67, 0x69, 0x76, 0x65, 0x6e, + 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, + 0x06, 0x12, 0x04, 0xaa, 0x01, 0x02, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, + 0x12, 0x04, 0xaa, 0x01, 0x12, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x03, 0x12, + 0x04, 0xaa, 0x01, 0x1c, 0x1d, 0x0a, 0xa7, 0x01, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x01, 0x12, 0x04, + 0xae, 0x01, 0x02, 0x3c, 0x1a, 0x98, 0x01, 0x20, 0x41, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x61, 0x6c, 0x77, 0x61, + 0x79, 0x73, 0x20, 0x6b, 0x69, 0x63, 0x6b, 0x65, 0x64, 0x20, 0x6f, 0x66, 0x66, 0x20, 0x62, 0x79, + 0x20, 0x61, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x63, 0x6f, 0x6e, + 0x73, 0x75, 0x6d, 0x65, 0x72, 0x0a, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x20, 0x74, 0x6f, + 0x20, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x74, 0x6f, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x20, + 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x06, 0x12, 0x04, 0xae, 0x01, 0x02, 0x1e, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x01, 0x12, 0x04, 0xae, 0x01, 0x1f, 0x37, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x01, 0x03, 0x12, 0x04, 0xae, 0x01, 0x3a, 0x3b, 0x0a, 0x51, 0x0a, 0x04, + 0x04, 0x07, 0x02, 0x02, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x1b, 0x1a, 0x43, 0x20, 0x72, 0x65, 0x74, + 0x72, 0x79, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2c, + 0x20, 0x69, 0x65, 0x2e, 0x2c, 0x20, 0x32, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x05, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x08, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x01, 0x12, 0x04, 0xb1, 0x01, 0x09, 0x16, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x02, 0x03, 0x12, 0x04, 0xb1, 0x01, 0x19, 0x1a, 0x0a, 0x2f, 0x0a, 0x04, + 0x04, 0x07, 0x02, 0x03, 0x12, 0x04, 0xb4, 0x01, 0x02, 0x25, 0x1a, 0x21, 0x20, 0x50, 0x68, 0x61, + 0x73, 0x65, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x03, 0x06, 0x12, 0x04, 0xb4, 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x03, 0x01, 0x12, 0x04, 0xb4, 0x01, 0x1b, 0x20, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x03, 0x03, 0x12, 0x04, 0xb4, 0x01, 0x23, 0x24, 0x0a, 0x52, 0x0a, 0x04, 0x04, 0x07, + 0x02, 0x04, 0x12, 0x04, 0xb7, 0x01, 0x02, 0x19, 0x1a, 0x44, 0x20, 0x69, 0x64, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x20, 0x74, 0x68, 0x61, + 0x74, 0x20, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2c, 0x20, 0x6d, 0x61, 0x69, 0x6e, 0x6c, 0x79, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x20, 0x64, 0x65, 0x62, 0x75, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x07, 0x02, 0x04, 0x05, 0x12, 0x04, 0xb7, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x04, 0x01, 0x12, 0x04, 0xb7, 0x01, 0x09, 0x14, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x04, 0x03, 0x12, 0x04, 0xb7, 0x01, 0x17, 0x18, 0x0a, 0x36, 0x0a, 0x04, 0x04, + 0x07, 0x02, 0x05, 0x12, 0x04, 0xba, 0x01, 0x02, 0x21, 0x1a, 0x28, 0x20, 0x6c, 0x6f, 0x67, 0x20, + 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x05, 0x04, 0x12, 0x04, 0xba, 0x01, + 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x05, 0x06, 0x12, 0x04, 0xba, 0x01, 0x0b, + 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x05, 0x01, 0x12, 0x04, 0xba, 0x01, 0x18, 0x1c, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x05, 0x03, 0x12, 0x04, 0xba, 0x01, 0x1f, 0x20, 0x0a, + 0x79, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x06, 0x12, 0x04, 0xbe, 0x01, 0x02, 0x2c, 0x1a, 0x6b, 0x20, + 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x20, 0x72, + 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6f, 0x72, 0x69, 0x67, 0x69, 0x6e, 0x61, 0x6c, 0x20, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x20, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x69, + 0x73, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x0a, 0x20, 0x62, 0x79, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x06, 0x06, 0x12, 0x04, 0xbe, 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, + 0x06, 0x01, 0x12, 0x04, 0xbe, 0x01, 0x1c, 0x27, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x06, + 0x03, 0x12, 0x04, 0xbe, 0x01, 0x2a, 0x2b, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x07, 0x08, 0x00, 0x12, + 0x06, 0xc0, 0x01, 0x02, 0xc7, 0x01, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x08, 0x00, 0x01, + 0x12, 0x04, 0xc0, 0x01, 0x08, 0x13, 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x07, 0x12, 0x04, + 0xc3, 0x01, 0x04, 0x19, 0x1a, 0x67, 0x20, 0x55, 0x52, 0x49, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x20, 0x69, 0x74, + 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x69, 0x6e, + 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x20, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x20, 0x69, + 0x65, 0x2e, 0x2c, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x2e, 0x2e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x07, 0x05, 0x12, 0x04, 0xc3, 0x01, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x07, 0x01, 0x12, 0x04, 0xc3, 0x01, 0x0b, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x07, 0x03, 0x12, 0x04, 0xc3, 0x01, 0x17, 0x18, 0x0a, 0x3f, 0x0a, 0x04, 0x04, 0x07, + 0x02, 0x08, 0x12, 0x04, 0xc6, 0x01, 0x04, 0x24, 0x1a, 0x31, 0x20, 0x52, 0x61, 0x77, 0x20, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x08, 0x06, 0x12, 0x04, 0xc6, 0x01, 0x04, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x08, 0x01, 0x12, 0x04, 0xc6, 0x01, 0x14, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, + 0x08, 0x03, 0x12, 0x04, 0xc6, 0x01, 0x21, 0x23, 0x0a, 0x0e, 0x0a, 0x04, 0x04, 0x07, 0x08, 0x01, + 0x12, 0x06, 0xc9, 0x01, 0x02, 0xd3, 0x01, 0x03, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x08, 0x01, + 0x01, 0x12, 0x04, 0xc9, 0x01, 0x08, 0x15, 0x0a, 0x9c, 0x01, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x09, + 0x12, 0x04, 0xcc, 0x01, 0x04, 0x1a, 0x1a, 0x8d, 0x01, 0x20, 0x55, 0x52, 0x49, 0x20, 0x74, 0x6f, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x69, 0x74, + 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, + 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, + 0x20, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x20, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x70, 0x72, + 0x6f, 0x76, 0x69, 0x64, 0x65, 0x72, 0x2e, 0x20, 0x69, 0x65, 0x2e, 0x2c, 0x20, 0x73, 0x33, 0x3a, + 0x2f, 0x2f, 0x2e, 0x2e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x09, 0x05, 0x12, + 0x04, 0xcc, 0x01, 0x04, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x09, 0x01, 0x12, 0x04, + 0xcc, 0x01, 0x0b, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x09, 0x03, 0x12, 0x04, 0xcc, + 0x01, 0x18, 0x19, 0x0a, 0x33, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x0a, 0x12, 0x04, 0xcf, 0x01, 0x04, + 0x23, 0x1a, 0x25, 0x20, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0a, + 0x06, 0x12, 0x04, 0xcf, 0x01, 0x04, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0a, 0x01, + 0x12, 0x04, 0xcf, 0x01, 0x18, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0a, 0x03, 0x12, + 0x04, 0xcf, 0x01, 0x20, 0x22, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x0b, 0x12, 0x04, 0xd2, + 0x01, 0x04, 0x25, 0x1a, 0x32, 0x20, 0x52, 0x61, 0x77, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, 0x20, 0x62, + 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0b, 0x06, + 0x12, 0x04, 0xd2, 0x01, 0x04, 0x13, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0b, 0x01, 0x12, + 0x04, 0xd2, 0x01, 0x14, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0b, 0x03, 0x12, 0x04, + 0xd2, 0x01, 0x22, 0x24, 0x0a, 0x77, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x0c, 0x12, 0x04, 0xd6, 0x01, + 0x02, 0x2a, 0x1a, 0x69, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x64, 0x61, 0x74, 0x61, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x73, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x62, 0x61, 0x63, 0x6b, + 0x2e, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x62, 0x6c, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x20, 0x76, 0x61, + 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x20, 0x69, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x0c, 0x06, 0x12, 0x04, 0xd6, 0x01, 0x02, 0x18, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x0c, 0x01, 0x12, 0x04, 0xd6, 0x01, 0x19, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x0c, 0x03, 0x12, 0x04, 0xd6, 0x01, 0x27, 0x29, 0x0a, 0xae, 0x02, 0x0a, 0x04, 0x04, + 0x07, 0x02, 0x0d, 0x12, 0x04, 0xdb, 0x01, 0x02, 0x1c, 0x1a, 0x9f, 0x02, 0x20, 0x53, 0x6f, 0x6d, + 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x73, 0x2c, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x20, 0x52, + 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x2c, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x65, 0x6e, 0x64, + 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x64, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x6c, 0x6f, 0x67, 0x73, + 0x2c, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x63, 0x75, 0x73, + 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x2c, 0x20, 0x65, 0x74, 0x63, 0x29, 0x0a, 0x20, + 0x74, 0x68, 0x61, 0x74, 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x72, + 0x65, 0x63, 0x6f, 0x72, 0x64, 0x65, 0x64, 0x20, 0x72, 0x65, 0x67, 0x61, 0x72, 0x64, 0x6c, 0x65, + 0x73, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x63, 0x6b, 0x20, 0x6f, + 0x66, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x2e, 0x0a, + 0x20, 0x54, 0x68, 0x65, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x63, + 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x20, 0x61, + 0x63, 0x72, 0x6f, 0x73, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x6e, 0x64, 0x69, 0x76, 0x69, 0x64, + 0x75, 0x61, 0x6c, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x0d, 0x05, 0x12, 0x04, 0xdb, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x0d, 0x01, 0x12, 0x04, 0xdb, 0x01, 0x09, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, + 0x0d, 0x03, 0x12, 0x04, 0xdb, 0x01, 0x19, 0x1b, 0x0a, 0x63, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x0e, + 0x12, 0x04, 0xdf, 0x01, 0x02, 0x29, 0x1a, 0x55, 0x20, 0x41, 0x6e, 0x20, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x65, 0x78, 0x70, 0x6c, 0x61, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x74, + 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x44, 0x65, 0x70, 0x72, + 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x3a, 0x20, 0x55, 0x73, 0x65, 0x20, 0x72, 0x65, 0x61, 0x73, + 0x6f, 0x6e, 0x73, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x65, 0x61, 0x64, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x07, 0x02, 0x0e, 0x05, 0x12, 0x04, 0xdf, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x0e, 0x01, 0x12, 0x04, 0xdf, 0x01, 0x09, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x0e, 0x03, 0x12, 0x04, 0xdf, 0x01, 0x12, 0x14, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x0e, 0x08, 0x12, 0x04, 0xdf, 0x01, 0x15, 0x28, 0x0a, 0x0e, 0x0a, 0x06, 0x04, 0x07, 0x02, + 0x0e, 0x08, 0x03, 0x12, 0x04, 0xdf, 0x01, 0x16, 0x27, 0x0a, 0x4a, 0x0a, 0x04, 0x04, 0x07, 0x02, + 0x0f, 0x12, 0x04, 0xe2, 0x01, 0x02, 0x24, 0x1a, 0x3c, 0x20, 0x41, 0x6e, 0x20, 0x6f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x65, 0x78, + 0x70, 0x6c, 0x61, 0x6e, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0f, 0x04, 0x12, 0x04, + 0xe2, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0f, 0x06, 0x12, 0x04, 0xe2, + 0x01, 0x0b, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0f, 0x01, 0x12, 0x04, 0xe2, 0x01, + 0x17, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x0f, 0x03, 0x12, 0x04, 0xe2, 0x01, 0x21, + 0x23, 0x0a, 0xb7, 0x02, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x10, 0x12, 0x04, 0xe7, 0x01, 0x02, 0x18, + 0x1a, 0xa8, 0x02, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x20, 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, + 0x54, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, + 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, + 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x20, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x0a, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x62, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x6c, 0x2c, 0x20, + 0x62, 0x75, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x6e, 0x65, 0x63, 0x65, + 0x73, 0x73, 0x61, 0x72, 0x69, 0x6c, 0x79, 0x20, 0x75, 0x73, 0x65, 0x20, 0x70, 0x72, 0x65, 0x2d, + 0x72, 0x65, 0x67, 0x69, 0x73, 0x74, 0x65, 0x72, 0x65, 0x64, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x74, 0x68, 0x69, 0x73, 0x0a, + 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20, + 0x74, 0x6f, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x55, 0x49, 0x2c, 0x20, 0x66, 0x69, + 0x6c, 0x74, 0x65, 0x72, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x2c, 0x20, 0x65, 0x74, 0x63, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x10, 0x05, 0x12, 0x04, 0xe7, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x10, 0x01, 0x12, 0x04, 0xe7, 0x01, 0x09, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, + 0x10, 0x03, 0x12, 0x04, 0xe7, 0x01, 0x15, 0x17, 0x0a, 0x38, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x11, + 0x12, 0x04, 0xea, 0x01, 0x02, 0x26, 0x1a, 0x2a, 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x61, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x68, 0x6f, 0x77, 0x20, 0x61, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x20, 0x77, 0x61, 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x64, + 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x11, 0x06, 0x12, 0x04, 0xea, 0x01, 0x02, + 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x11, 0x01, 0x12, 0x04, 0xea, 0x01, 0x18, 0x20, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x11, 0x03, 0x12, 0x04, 0xea, 0x01, 0x23, 0x25, 0x0a, + 0xb4, 0x02, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x12, 0x12, 0x04, 0xf0, 0x01, 0x02, 0x1b, 0x1a, 0xa5, + 0x02, 0x20, 0x54, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x69, + 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, 0x65, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, + 0x64, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x68, 0x6f, 0x77, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x73, 0x20, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, + 0x64, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x69, 0x73, 0x0a, 0x20, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x20, 0x46, 0x6f, 0x72, + 0x20, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2c, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, + 0x76, 0x65, 0x72, 0x69, 0x73, 0x6f, 0x6e, 0x20, 0x3e, 0x20, 0x30, 0x20, 0x6d, 0x65, 0x61, 0x6e, + 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x6d, 0x61, 0x70, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x20, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x6c, 0x6f, 0x67, 0x73, 0x20, 0x75, 0x73, + 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x45, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x49, + 0x6e, 0x66, 0x6f, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x65, + 0x61, 0x63, 0x68, 0x20, 0x73, 0x75, 0x62, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x72, 0x61, 0x74, 0x68, + 0x65, 0x72, 0x20, 0x74, 0x68, 0x61, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x54, 0x61, 0x73, 0x6b, + 0x4c, 0x6f, 0x67, 0x0a, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x12, 0x05, 0x12, + 0x04, 0xf0, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x12, 0x01, 0x12, 0x04, + 0xf0, 0x01, 0x08, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x12, 0x03, 0x12, 0x04, 0xf0, + 0x01, 0x18, 0x1a, 0x0a, 0xa6, 0x03, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x13, 0x12, 0x04, 0xf6, 0x01, + 0x02, 0x2d, 0x1a, 0x97, 0x03, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x77, 0x68, 0x65, 0x6e, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x77, 0x61, 0x73, 0x20, 0x72, + 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x72, 0x61, 0x6d, 0x65, 0x77, 0x6f, + 0x72, 0x6b, 0x2e, 0x20, 0x46, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x2c, + 0x20, 0x61, 0x20, 0x6b, 0x38, 0x73, 0x0a, 0x20, 0x70, 0x6f, 0x64, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, 0x6d, 0x61, 0x72, 0x6b, 0x65, 0x64, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x61, 0x74, 0x20, 0x28, 0x69, 0x65, 0x2e, + 0x20, 0x60, 0x6f, 0x63, 0x63, 0x75, 0x72, 0x72, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x60, 0x29, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x74, 0x65, 0x73, 0x2c, 0x0a, 0x20, 0x62, 0x75, 0x74, 0x20, 0x74, 0x68, 0x69, 0x73, + 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x6e, 0x6f, 0x74, 0x20, + 0x62, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x20, 0x75, 0x6e, 0x74, 0x69, + 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6f, 0x64, 0x20, 0x69, 0x73, 0x20, 0x6d, 0x61, 0x72, + 0x6b, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x2e, 0x20, 0x45, 0x78, 0x74, 0x72, 0x61, 0x63, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x62, 0x6f, 0x74, + 0x68, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x73, 0x65, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x73, 0x0a, 0x20, 0x66, 0x61, 0x63, 0x69, 0x6c, 0x69, 0x74, 0x61, 0x74, + 0x65, 0x73, 0x20, 0x61, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, + 0x74, 0x65, 0x20, 0x70, 0x6f, 0x72, 0x74, 0x72, 0x61, 0x79, 0x61, 0x6c, 0x20, 0x6f, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x69, 0x6d, 0x65, 0x2d, 0x73, 0x65, 0x72, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x07, 0x02, 0x13, 0x06, 0x12, 0x04, 0xf6, 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x07, 0x02, 0x13, 0x01, 0x12, 0x04, 0xf6, 0x01, 0x1c, 0x27, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x13, 0x03, 0x12, 0x04, 0xf6, 0x01, 0x2a, 0x2c, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x07, 0x02, + 0x14, 0x12, 0x04, 0xf9, 0x01, 0x02, 0x23, 0x1a, 0x4c, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, + 0x6e, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, + 0x20, 0x6c, 0x6f, 0x67, 0x73, 0x20, 0x72, 0x65, 0x6c, 0x61, 0x74, 0x65, 0x64, 0x20, 0x74, 0x6f, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x14, 0x06, 0x12, 0x04, + 0xf9, 0x01, 0x02, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x14, 0x01, 0x12, 0x04, 0xf9, + 0x01, 0x12, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x14, 0x03, 0x12, 0x04, 0xf9, 0x01, + 0x20, 0x22, 0x0a, 0x76, 0x0a, 0x02, 0x04, 0x08, 0x12, 0x06, 0xfd, 0x01, 0x00, 0x9e, 0x02, 0x01, + 0x1a, 0x68, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, + 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, + 0x63, 0x65, 0x64, 0x20, 0x6f, 0x72, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x61, + 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x08, + 0x01, 0x12, 0x04, 0xfd, 0x01, 0x08, 0x1c, 0x0a, 0x84, 0x01, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x00, + 0x12, 0x04, 0xff, 0x01, 0x02, 0x19, 0x1a, 0x76, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, + 0x69, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x51, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x20, + 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x72, 0x20, 0x70, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, 0x69, 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x08, 0x02, 0x00, 0x05, 0x12, 0x04, 0xff, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x08, 0x02, 0x00, 0x01, 0x12, 0x04, 0xff, 0x01, 0x09, 0x14, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x08, 0x02, 0x00, 0x03, 0x12, 0x04, 0xff, 0x01, 0x17, 0x18, 0x0a, 0x91, 0x02, 0x0a, 0x04, + 0x04, 0x08, 0x02, 0x01, 0x12, 0x04, 0x84, 0x02, 0x02, 0x13, 0x1a, 0x82, 0x02, 0x20, 0x41, 0x20, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x72, 0x65, 0x73, 0x70, + 0x65, 0x63, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x20, 0x41, 0x6c, 0x74, + 0x68, 0x6f, 0x75, 0x67, 0x68, 0x20, 0x74, 0x68, 0x65, 0x0a, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x63, 0x68, 0x61, 0x6e, 0x67, 0x65, + 0x20, 0x62, 0x65, 0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x72, 0x65, + 0x70, 0x6f, 0x72, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x6f, + 0x72, 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x61, 0x6d, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x61, + 0x67, 0x67, 0x72, 0x65, 0x67, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x69, 0x6e, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x6d, 0x75, 0x6c, + 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x2e, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x01, 0x05, 0x12, 0x04, 0x84, 0x02, 0x02, 0x08, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x08, 0x02, 0x01, 0x01, 0x12, 0x04, 0x84, 0x02, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x08, 0x02, 0x01, 0x03, 0x12, 0x04, 0x84, 0x02, 0x11, 0x12, 0x0a, 0x5e, 0x0a, 0x04, + 0x04, 0x08, 0x02, 0x02, 0x12, 0x04, 0x87, 0x02, 0x02, 0x1b, 0x1a, 0x50, 0x20, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x20, 0x69, 0x65, + 0x2e, 0x2c, 0x20, 0x32, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x08, 0x02, 0x02, 0x05, 0x12, 0x04, 0x87, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x08, 0x02, 0x02, 0x01, 0x12, 0x04, 0x87, 0x02, 0x09, 0x16, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, + 0x02, 0x02, 0x03, 0x12, 0x04, 0x87, 0x02, 0x19, 0x1a, 0x0a, 0x3b, 0x0a, 0x04, 0x04, 0x08, 0x02, + 0x03, 0x12, 0x04, 0x8a, 0x02, 0x02, 0x25, 0x1a, 0x2d, 0x20, 0x50, 0x68, 0x61, 0x73, 0x65, 0x20, + 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x03, 0x06, 0x12, + 0x04, 0x8a, 0x02, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x03, 0x01, 0x12, 0x04, + 0x8a, 0x02, 0x1b, 0x20, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x03, 0x03, 0x12, 0x04, 0x8a, + 0x02, 0x23, 0x24, 0x0a, 0x54, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x04, 0x12, 0x04, 0x8d, 0x02, 0x02, + 0x2b, 0x1a, 0x46, 0x20, 0x43, 0x61, 0x70, 0x74, 0x75, 0x72, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x61, 0x63, 0x68, 0x69, + 0x6e, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, + 0x04, 0x06, 0x12, 0x04, 0x8d, 0x02, 0x02, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x04, + 0x01, 0x12, 0x04, 0x8d, 0x02, 0x1a, 0x26, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x04, 0x03, + 0x12, 0x04, 0x8d, 0x02, 0x29, 0x2a, 0x0a, 0x43, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x05, 0x12, 0x04, + 0x90, 0x02, 0x02, 0x21, 0x1a, 0x35, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, + 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x08, 0x02, 0x05, 0x04, 0x12, 0x04, 0x90, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, + 0x02, 0x05, 0x06, 0x12, 0x04, 0x90, 0x02, 0x0b, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, + 0x05, 0x01, 0x12, 0x04, 0x90, 0x02, 0x18, 0x1c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x05, + 0x03, 0x12, 0x04, 0x90, 0x02, 0x1f, 0x20, 0x0a, 0x9c, 0x02, 0x0a, 0x04, 0x04, 0x08, 0x08, 0x00, + 0x12, 0x06, 0x95, 0x02, 0x02, 0x97, 0x02, 0x03, 0x1a, 0x8b, 0x02, 0x20, 0x41, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, + 0x74, 0x6f, 0x20, 0x64, 0x6f, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x27, 0x73, 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x20, 0x62, 0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6e, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x20, 0x57, 0x65, 0x20, 0x61, + 0x72, 0x65, 0x0a, 0x20, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x6c, 0x79, 0x20, 0x6e, + 0x6f, 0x74, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x20, 0x68, 0x65, 0x72, 0x65, 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, 0x73, 0x65, + 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x72, + 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6e, 0x65, 0x65, 0x64, 0x65, 0x64, + 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x77, 0x65, 0x20, 0x64, 0x65, 0x63, 0x69, 0x64, 0x65, 0x20, + 0x74, 0x6f, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x66, 0x75, 0x74, 0x75, 0x72, 0x65, 0x2c, 0x20, 0x77, 0x65, 0x20, 0x73, 0x68, 0x6f, + 0x75, 0x6c, 0x64, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x61, 0x63, 0x68, 0x65, 0x5f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x20, 0x66, + 0x69, 0x65, 0x6c, 0x64, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x08, 0x00, 0x01, 0x12, + 0x04, 0x95, 0x02, 0x08, 0x17, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x06, 0x12, 0x04, 0x96, + 0x02, 0x04, 0x34, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x06, 0x06, 0x12, 0x04, 0x96, 0x02, + 0x04, 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x06, 0x01, 0x12, 0x04, 0x96, 0x02, 0x19, + 0x2f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x06, 0x03, 0x12, 0x04, 0x96, 0x02, 0x32, 0x33, + 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x07, 0x12, 0x04, 0x9a, 0x02, 0x02, 0x29, 0x1a, 0x33, + 0x20, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x66, 0x69, 0x65, 0x6c, + 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x2c, 0x20, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x2d, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x69, 0x6e, + 0x66, 0x6f, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x07, 0x06, 0x12, 0x04, 0x9a, 0x02, + 0x02, 0x18, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x07, 0x01, 0x12, 0x04, 0x9a, 0x02, 0x19, + 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, 0x02, 0x07, 0x03, 0x12, 0x04, 0x9a, 0x02, 0x27, 0x28, + 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x08, 0x02, 0x08, 0x12, 0x04, 0x9d, 0x02, 0x02, 0x22, 0x1a, 0x4c, + 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x20, 0x6c, 0x6f, 0x67, 0x73, 0x20, 0x72, 0x65, 0x6c, + 0x61, 0x74, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x08, 0x02, 0x08, 0x06, 0x12, 0x04, 0x9d, 0x02, 0x02, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x08, 0x02, 0x08, 0x01, 0x12, 0x04, 0x9d, 0x02, 0x12, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x08, + 0x02, 0x08, 0x03, 0x12, 0x04, 0x9d, 0x02, 0x20, 0x21, 0x0a, 0x9a, 0x01, 0x0a, 0x02, 0x04, 0x09, + 0x12, 0x06, 0xa2, 0x02, 0x00, 0xa8, 0x02, 0x01, 0x1a, 0x8b, 0x01, 0x20, 0x54, 0x68, 0x69, 0x73, + 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x68, 0x6f, 0x6c, 0x64, 0x73, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, + 0x74, 0x6f, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x6d, + 0x61, 0x6e, 0x61, 0x67, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, + 0x0a, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x04, 0xa2, + 0x02, 0x08, 0x18, 0x0a, 0x5b, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x00, 0x12, 0x04, 0xa4, 0x02, 0x02, + 0x1e, 0x1a, 0x4d, 0x20, 0x55, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x20, 0x49, 0x44, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x69, + 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x79, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x05, 0x12, 0x04, 0xa4, 0x02, 0x02, 0x08, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x01, 0x12, 0x04, 0xa4, 0x02, 0x09, 0x19, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x03, 0x12, 0x04, 0xa4, 0x02, 0x1c, 0x1d, 0x0a, 0x58, 0x0a, + 0x04, 0x04, 0x09, 0x02, 0x01, 0x12, 0x04, 0xa7, 0x02, 0x02, 0x17, 0x1a, 0x4a, 0x20, 0x4e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x75, 0x6e, 0x64, 0x65, 0x72, 0x20, 0x77, 0x68, + 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x65, + 0x64, 0x20, 0x61, 0x6e, 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x05, + 0x12, 0x04, 0xa7, 0x02, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x01, 0x12, + 0x04, 0xa7, 0x02, 0x09, 0x12, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, 0x03, 0x12, 0x04, + 0xa7, 0x02, 0x15, 0x16, 0x0a, 0x89, 0x03, 0x0a, 0x02, 0x04, 0x0a, 0x12, 0x06, 0xae, 0x02, 0x00, + 0xc5, 0x02, 0x01, 0x1a, 0xfa, 0x02, 0x20, 0x48, 0x6f, 0x6c, 0x64, 0x73, 0x20, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x68, 0x6f, 0x77, + 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x77, 0x61, 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x41, 0x73, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x63, 0x72, + 0x6f, 0x73, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x73, + 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x20, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x2c, 0x20, 0x73, 0x75, 0x63, 0x68, 0x20, 0x69, 0x74, 0x73, 0x20, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x2c, 0x0a, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6d, + 0x6f, 0x72, 0x65, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x67, 0x72, 0x6f, 0x77, 0x20, 0x69, 0x6e, 0x20, + 0x73, 0x69, 0x7a, 0x65, 0x20, 0x62, 0x75, 0x74, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x63, 0x68, 0x61, + 0x6e, 0x67, 0x65, 0x20, 0x6e, 0x65, 0x63, 0x65, 0x73, 0x73, 0x61, 0x72, 0x69, 0x6c, 0x79, 0x20, + 0x62, 0x61, 0x73, 0x65, 0x64, 0x20, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, + 0x73, 0x65, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x61, 0x74, 0x20, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x65, 0x64, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, + 0x76, 0x65, 0x6e, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x0a, 0x20, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e, 0x74, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x73, 0x65, 0x20, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x20, 0x61, 0x63, 0x72, 0x6f, 0x73, + 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6c, 0x69, 0x66, 0x65, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x2e, 0x0a, + 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x04, 0xae, 0x02, 0x08, 0x1d, 0x0a, 0x53, 0x0a, + 0x04, 0x04, 0x0a, 0x02, 0x00, 0x12, 0x04, 0xb0, 0x02, 0x02, 0x1c, 0x1a, 0x45, 0x20, 0x55, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x2c, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, + 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x75, 0x73, 0x65, + 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x05, 0x12, 0x04, 0xb0, 0x02, 0x02, + 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x01, 0x12, 0x04, 0xb0, 0x02, 0x09, 0x17, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, 0x04, 0xb0, 0x02, 0x1a, 0x1b, 0x0a, + 0x90, 0x01, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x01, 0x12, 0x04, 0xb3, 0x02, 0x02, 0x37, 0x1a, 0x81, + 0x01, 0x20, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x6f, 0x6e, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x6f, 0x6e, 0x20, 0x6f, 0x74, 0x68, 0x65, 0x72, + 0x20, 0x62, 0x61, 0x63, 0x6b, 0x2d, 0x65, 0x6e, 0x64, 0x73, 0x20, 0x6f, 0x72, 0x20, 0x70, 0x6c, + 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x73, 0x20, 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x48, 0x69, + 0x76, 0x65, 0x2c, 0x20, 0x51, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x2c, 0x20, 0x65, 0x74, 0x63, 0x29, + 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x04, 0x12, 0x04, 0xb3, 0x02, 0x02, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x06, 0x12, 0x04, 0xb3, 0x02, 0x0b, 0x1f, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x01, 0x12, 0x04, 0xb3, 0x02, 0x20, 0x32, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, 0x03, 0x12, 0x04, 0xb3, 0x02, 0x35, 0x36, 0x0a, 0xca, + 0x01, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x02, 0x12, 0x04, 0xb7, 0x02, 0x02, 0x33, 0x1a, 0xbb, 0x01, + 0x20, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x6e, 0x20, 0x63, 0x6f, 0x6e, + 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x20, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x72, + 0x65, 0x70, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x20, 0x62, 0x65, + 0x63, 0x61, 0x75, 0x73, 0x65, 0x20, 0x61, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x63, + 0x61, 0x6e, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x61, 0x6c, 0x6c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x64, 0x75, 0x72, 0x69, 0x6e, 0x67, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x0a, 0x02, 0x02, 0x04, 0x12, 0x04, 0xb7, 0x02, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, + 0x02, 0x02, 0x06, 0x12, 0x04, 0xb7, 0x02, 0x0b, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, + 0x02, 0x01, 0x12, 0x04, 0xb7, 0x02, 0x1c, 0x2e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x02, + 0x03, 0x12, 0x04, 0xb7, 0x02, 0x31, 0x32, 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x03, 0x12, + 0x04, 0xba, 0x02, 0x02, 0x1f, 0x1a, 0x39, 0x20, 0x54, 0x68, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x65, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x03, 0x05, 0x12, 0x04, 0xba, 0x02, 0x02, 0x08, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x04, 0xba, 0x02, 0x09, 0x1a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x04, 0xba, 0x02, 0x1d, 0x1e, 0x0a, 0x5f, 0x0a, + 0x04, 0x04, 0x0a, 0x04, 0x00, 0x12, 0x06, 0xbd, 0x02, 0x02, 0xc3, 0x02, 0x03, 0x1a, 0x4f, 0x20, + 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x62, 0x72, 0x6f, + 0x61, 0x64, 0x20, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x6d, + 0x61, 0x63, 0x68, 0x69, 0x6e, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0a, 0x04, 0x00, 0x01, 0x12, 0x04, 0xbd, 0x02, 0x07, 0x14, 0x0a, 0x5b, 0x0a, + 0x06, 0x04, 0x0a, 0x04, 0x00, 0x02, 0x00, 0x12, 0x04, 0xbf, 0x02, 0x04, 0x10, 0x1a, 0x4b, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x75, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x0a, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x0a, + 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x04, 0xbf, 0x02, 0x04, 0x0b, 0x0a, 0x0f, 0x0a, 0x07, 0x04, + 0x0a, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x04, 0xbf, 0x02, 0x0e, 0x0f, 0x0a, 0x48, 0x0a, 0x06, + 0x04, 0x0a, 0x04, 0x00, 0x02, 0x01, 0x12, 0x04, 0xc2, 0x02, 0x04, 0x16, 0x1a, 0x38, 0x20, 0x54, + 0x68, 0x65, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x20, 0x63, 0x6c, 0x61, 0x73, + 0x73, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x0a, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x0a, 0x04, 0x00, 0x02, 0x01, + 0x01, 0x12, 0x04, 0xc2, 0x02, 0x04, 0x11, 0x0a, 0x0f, 0x0a, 0x07, 0x04, 0x0a, 0x04, 0x00, 0x02, + 0x01, 0x02, 0x12, 0x04, 0xc2, 0x02, 0x14, 0x15, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x04, + 0x12, 0x04, 0xc4, 0x02, 0x02, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x04, 0x06, 0x12, + 0x04, 0xc4, 0x02, 0x02, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, + 0xc4, 0x02, 0x10, 0x1e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0xc4, + 0x02, 0x21, 0x23, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0x84, 0x28, 0x0a, 0x21, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, + 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x1a, 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, + 0x72, 0x65, 0x2f, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xb8, 0x04, 0x0a, 0x1b, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x44, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, + 0x72, 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, + 0x63, 0x65, 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, + 0x61, 0x63, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, + 0x61, 0x63, 0x74, 0x49, 0x44, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, + 0x64, 0x73, 0x12, 0x5c, 0x0a, 0x13, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x12, 0x72, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x40, + 0x0a, 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x64, + 0x12, 0x50, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x38, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x57, 0x6f, 0x72, + 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x99, 0x04, + 0x0a, 0x17, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, 0x65, + 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x09, 0x72, 0x61, 0x77, + 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x4e, + 0x6f, 0x64, 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0x52, 0x08, 0x72, 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x49, 0x0a, 0x0c, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, + 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0a, 0x74, 0x61, 0x73, 0x6b, + 0x45, 0x78, 0x65, 0x63, 0x49, 0x64, 0x12, 0x49, 0x0a, 0x10, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, + 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x64, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, 0x65, + 0x52, 0x0f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x66, 0x61, 0x63, + 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x49, 0x44, 0x52, 0x0b, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x73, + 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x05, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x12, 0x40, + 0x0a, 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, + 0x65, 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x64, + 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x4e, 0x6f, 0x64, + 0x65, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe4, 0x01, 0x0a, 0x17, 0x43, 0x6c, + 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x09, 0x72, 0x61, 0x77, 0x5f, 0x65, 0x76, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x08, 0x72, + 0x61, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x22, 0xf3, 0x02, 0x0a, 0x18, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x4e, 0x0a, + 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x40, 0x0a, + 0x0e, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x5f, 0x70, 0x6c, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, + 0x72, 0x52, 0x0c, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x50, 0x6c, 0x61, 0x6e, 0x49, 0x64, 0x12, + 0x3b, 0x0a, 0x0b, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, + 0x52, 0x0a, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, + 0x6f, 0x72, 0x65, 0x2e, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x44, 0x52, 0x0b, + 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x49, 0x64, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, + 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x5f, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x73, + 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, + 0x54, 0x72, 0x61, 0x63, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x70, 0x72, 0x69, 0x6e, + 0x63, 0x69, 0x70, 0x61, 0x6c, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x72, 0x69, + 0x6e, 0x63, 0x69, 0x70, 0x61, 0x6c, 0x42, 0xb9, 0x01, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x42, 0x10, + 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0xa2, 0x02, 0x03, 0x46, 0x45, 0x58, 0xaa, 0x02, 0x0f, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0xca, + 0x02, 0x0f, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, 0x65, 0x6e, + 0x74, 0xe2, 0x02, 0x1b, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x10, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x4a, 0xd5, 0x17, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x4e, 0x01, 0x0a, 0x08, 0x0a, + 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, + 0x18, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x2a, 0x0a, 0x09, 0x0a, 0x02, + 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x06, + 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x03, 0x07, 0x00, 0x25, 0x0a, 0x08, 0x0a, + 0x01, 0x08, 0x12, 0x03, 0x09, 0x00, 0x4a, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x09, + 0x00, 0x4a, 0x0a, 0xb1, 0x01, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0d, 0x00, 0x1f, 0x01, 0x1a, + 0xa4, 0x01, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6c, + 0x6c, 0x65, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x61, 0x77, 0x20, 0x57, + 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x20, 0x49, + 0x74, 0x27, 0x73, 0x20, 0x66, 0x69, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x77, 0x69, + 0x74, 0x68, 0x20, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x20, 0x69, + 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, + 0x64, 0x6f, 0x77, 0x6e, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x75, + 0x6d, 0x65, 0x72, 0x73, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x66, 0x69, 0x6e, 0x64, 0x20, 0x75, 0x73, + 0x65, 0x66, 0x75, 0x6c, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0d, + 0x08, 0x23, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0e, 0x02, 0x2d, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0e, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0e, 0x1f, 0x28, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0e, 0x2b, 0x2c, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x01, 0x12, 0x03, 0x10, 0x02, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, + 0x03, 0x10, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x10, + 0x16, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x10, 0x29, 0x2a, + 0x0a, 0x86, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x14, 0x02, 0x2c, 0x1a, 0x79, + 0x20, 0x54, 0x68, 0x65, 0x20, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, 0x77, 0x69, 0x6e, 0x67, 0x20, 0x61, + 0x72, 0x65, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x0a, 0x20, 0x57, 0x65, 0x20, + 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, + 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x20, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x04, 0x12, 0x03, 0x14, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, + 0x12, 0x03, 0x14, 0x0b, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, + 0x14, 0x1b, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x14, 0x2a, + 0x2b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x15, 0x02, 0x3b, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x06, 0x12, 0x03, 0x15, 0x02, 0x22, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x15, 0x23, 0x36, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x03, 0x03, 0x12, 0x03, 0x15, 0x39, 0x3a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, + 0x12, 0x03, 0x16, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x05, 0x12, 0x03, + 0x16, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x16, 0x09, + 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, 0x16, 0x15, 0x16, 0x0a, + 0xcd, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x05, 0x12, 0x03, 0x1b, 0x02, 0x25, 0x1a, 0xbf, 0x01, + 0x20, 0x54, 0x68, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, + 0x50, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, + 0x68, 0x61, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x48, 0x65, 0x72, + 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, + 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x4c, + 0x61, 0x75, 0x6e, 0x63, 0x68, 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x20, 0x49, 0x44, 0x73, 0x20, 0x61, + 0x72, 0x65, 0x20, 0x65, 0x61, 0x73, 0x69, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, + 0x20, 0x74, 0x68, 0x61, 0x6e, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x49, + 0x44, 0x73, 0x20, 0x73, 0x6f, 0x20, 0x77, 0x65, 0x27, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, + 0x74, 0x68, 0x65, 0x73, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x77, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x06, 0x12, 0x03, 0x1b, 0x02, 0x11, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x05, 0x01, 0x12, 0x03, 0x1b, 0x12, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x05, 0x03, 0x12, 0x03, 0x1b, 0x23, 0x24, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x06, 0x12, 0x03, 0x1e, 0x02, 0x21, 0x1a, 0x4d, 0x20, 0x57, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x27, + 0x74, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x20, 0x62, 0x65, 0x63, + 0x61, 0x75, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, + 0x79, 0x63, 0x6c, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x06, 0x12, 0x03, + 0x1e, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x01, 0x12, 0x03, 0x1e, 0x16, + 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x03, 0x12, 0x03, 0x1e, 0x1f, 0x20, 0x0a, + 0x0a, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x21, 0x00, 0x36, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, + 0x01, 0x01, 0x12, 0x03, 0x21, 0x08, 0x1f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, + 0x03, 0x22, 0x02, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x22, + 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x22, 0x1b, 0x24, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x22, 0x27, 0x28, 0x0a, 0x38, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x25, 0x02, 0x30, 0x1a, 0x2b, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x76, 0x61, 0x6e, 0x74, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x66, 0x20, 0x61, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x62, 0x6c, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, + 0x06, 0x12, 0x03, 0x25, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, + 0x03, 0x25, 0x1f, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x25, + 0x2e, 0x2f, 0x0a, 0x48, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x28, 0x02, 0x2b, 0x1a, + 0x3b, 0x20, 0x54, 0x68, 0x65, 0x20, 0x74, 0x79, 0x70, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x65, + 0x72, 0x66, 0x61, 0x63, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x64, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x02, 0x06, 0x12, 0x03, 0x28, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x02, 0x01, 0x12, 0x03, 0x28, 0x16, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, + 0x03, 0x12, 0x03, 0x28, 0x29, 0x2a, 0x0a, 0x86, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, + 0x03, 0x2c, 0x02, 0x2c, 0x1a, 0x79, 0x20, 0x54, 0x68, 0x65, 0x20, 0x66, 0x6f, 0x6c, 0x6c, 0x6f, + 0x77, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x72, 0x65, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, + 0x73, 0x0a, 0x20, 0x57, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x68, 0x61, 0x76, 0x65, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x64, 0x69, + 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, 0x73, 0x65, 0x20, 0x6f, + 0x66, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x04, 0x12, 0x03, 0x2c, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x03, 0x06, 0x12, 0x03, 0x2c, 0x0b, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x03, 0x01, 0x12, 0x03, 0x2c, 0x1b, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x2c, 0x2a, 0x2b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, + 0x03, 0x2d, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x05, 0x12, 0x03, 0x2d, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x2d, 0x09, 0x12, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, 0x2d, 0x15, 0x16, 0x0a, 0xcd, + 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x05, 0x12, 0x03, 0x32, 0x02, 0x25, 0x1a, 0xbf, 0x01, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x49, 0x44, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, 0x50, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x74, 0x68, + 0x61, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x2e, 0x0a, 0x20, 0x48, 0x65, 0x72, 0x65, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x65, 0x6e, 0x61, 0x6e, 0x63, 0x65, 0x20, + 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x4c, 0x61, + 0x75, 0x6e, 0x63, 0x68, 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x20, 0x49, 0x44, 0x73, 0x20, 0x61, 0x72, + 0x65, 0x20, 0x65, 0x61, 0x73, 0x69, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, + 0x74, 0x68, 0x61, 0x6e, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x49, 0x44, + 0x73, 0x20, 0x73, 0x6f, 0x20, 0x77, 0x65, 0x27, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, + 0x68, 0x65, 0x73, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6e, 0x6f, 0x77, 0x2e, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x06, 0x12, 0x03, 0x32, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x05, 0x01, 0x12, 0x03, 0x32, 0x12, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x05, 0x03, 0x12, 0x03, 0x32, 0x23, 0x24, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x06, + 0x12, 0x03, 0x35, 0x02, 0x21, 0x1a, 0x4d, 0x20, 0x57, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x27, 0x74, + 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, 0x20, 0x62, 0x65, 0x63, 0x61, + 0x75, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x79, + 0x63, 0x6c, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x06, 0x12, 0x03, 0x35, + 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x01, 0x12, 0x03, 0x35, 0x16, 0x1c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x03, 0x12, 0x03, 0x35, 0x1f, 0x20, 0x0a, 0x0a, + 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x38, 0x00, 0x3c, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, + 0x01, 0x12, 0x03, 0x38, 0x08, 0x1f, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, + 0x39, 0x02, 0x29, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x03, 0x39, 0x02, + 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x39, 0x1b, 0x24, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x39, 0x27, 0x28, 0x0a, 0x5a, 0x0a, + 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x3b, 0x02, 0x21, 0x1a, 0x4d, 0x20, 0x57, 0x65, 0x20, + 0x63, 0x61, 0x6e, 0x27, 0x74, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x45, + 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6c, 0x79, + 0x20, 0x62, 0x65, 0x63, 0x61, 0x75, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x69, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x20, 0x63, 0x79, 0x63, 0x6c, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x06, 0x12, 0x03, 0x3b, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x3b, 0x16, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, + 0x3b, 0x1f, 0x20, 0x0a, 0x4e, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x3f, 0x00, 0x4e, 0x01, 0x1a, + 0x42, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x69, 0x73, 0x20, + 0x74, 0x6f, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x62, 0x79, 0x20, 0x41, 0x64, + 0x6d, 0x69, 0x6e, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x69, 0x74, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x3f, 0x08, 0x20, 0x0a, + 0x25, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x41, 0x02, 0x34, 0x1a, 0x18, 0x20, 0x54, + 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, + 0x03, 0x41, 0x02, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x41, + 0x23, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x41, 0x32, 0x33, + 0x0a, 0x24, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x43, 0x02, 0x25, 0x1a, 0x17, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x20, 0x70, 0x6c, 0x61, 0x6e, 0x20, + 0x75, 0x73, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x06, 0x12, + 0x03, 0x43, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x43, + 0x12, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x43, 0x23, 0x24, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x45, 0x02, 0x22, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x02, 0x06, 0x12, 0x03, 0x45, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x03, 0x02, 0x02, 0x01, 0x12, 0x03, 0x45, 0x12, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x45, 0x20, 0x21, 0x0a, 0x9f, 0x01, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x03, + 0x12, 0x03, 0x48, 0x02, 0x2c, 0x1a, 0x91, 0x01, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x77, 0x65, 0x20, + 0x68, 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x75, 0x6c, 0x6c, 0x20, 0x41, 0x72, + 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x49, 0x44, 0x2e, 0x20, 0x54, 0x68, 0x65, 0x73, 0x65, + 0x20, 0x61, 0x72, 0x65, 0x20, 0x6c, 0x69, 0x6b, 0x65, 0x6c, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x72, 0x74, 0x69, 0x66, 0x61, + 0x63, 0x74, 0x20, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, + 0x61, 0x72, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x03, 0x04, 0x12, 0x03, 0x48, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x06, + 0x12, 0x03, 0x48, 0x0b, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x01, 0x12, 0x03, + 0x48, 0x1b, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x03, 0x12, 0x03, 0x48, 0x2a, + 0x2b, 0x0a, 0xa7, 0x01, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x04, 0x12, 0x03, 0x4b, 0x02, 0x28, 0x1a, + 0x99, 0x01, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, 0x74, 0x20, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, + 0x6f, 0x77, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x77, 0x65, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x68, + 0x61, 0x76, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x72, 0x61, 0x63, 0x6b, 0x69, 0x6e, 0x67, + 0x20, 0x62, 0x69, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x27, 0x73, 0x20, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6c, 0x6c, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x4c, + 0x69, 0x74, 0x65, 0x72, 0x61, 0x6c, 0x27, 0x73, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x41, 0x72, 0x74, 0x69, 0x66, 0x61, 0x63, + 0x74, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x03, 0x02, 0x04, 0x04, 0x12, 0x03, 0x4b, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x4b, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x4b, 0x12, 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x4b, 0x26, 0x27, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x05, 0x12, 0x03, 0x4d, 0x02, 0x17, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, 0x05, 0x12, 0x03, 0x4d, 0x02, 0x08, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, 0x01, 0x12, 0x03, 0x4d, 0x09, 0x12, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x05, 0x03, 0x12, 0x03, 0x4d, 0x15, 0x16, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +]; +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.plugins.kubeflow.rs b/gen/rust/src/flyteidl2.plugins.kubeflow.rs new file mode 100644 index 0000000000..23cf078360 --- /dev/null +++ b/gen/rust/src/flyteidl2.plugins.kubeflow.rs @@ -0,0 +1,836 @@ +// @generated +// This file is @generated by prost-build. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct RunPolicy { + /// Defines the policy to kill pods after the job completes. Default to None. + #[prost(enumeration="CleanPodPolicy", tag="1")] + pub clean_pod_policy: i32, + /// TTL to clean up jobs. Default to infinite. + #[prost(int32, tag="2")] + pub ttl_seconds_after_finished: i32, + /// Specifies the duration in seconds relative to the startTime that the job may be active + /// before the system tries to terminate it; value must be positive integer. + #[prost(int32, tag="3")] + pub active_deadline_seconds: i32, + /// Number of retries before marking this job failed. + #[prost(int32, tag="4")] + pub backoff_limit: i32, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum CleanPodPolicy { + CleanpodPolicyNone = 0, + CleanpodPolicyRunning = 1, + CleanpodPolicyAll = 2, +} +impl CleanPodPolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + CleanPodPolicy::CleanpodPolicyNone => "CLEANPOD_POLICY_NONE", + CleanPodPolicy::CleanpodPolicyRunning => "CLEANPOD_POLICY_RUNNING", + CleanPodPolicy::CleanpodPolicyAll => "CLEANPOD_POLICY_ALL", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "CLEANPOD_POLICY_NONE" => Some(Self::CleanpodPolicyNone), + "CLEANPOD_POLICY_RUNNING" => Some(Self::CleanpodPolicyRunning), + "CLEANPOD_POLICY_ALL" => Some(Self::CleanpodPolicyAll), + _ => None, + } + } +} +/// Proto for plugin that enables distributed training using +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag="1")] + pub worker_replicas: ::core::option::Option, + /// Master replicas spec + #[prost(message, optional, tag="2")] + pub launcher_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag="3")] + pub run_policy: ::core::option::Option, + /// Number of slots per worker + #[prost(int32, tag="4")] + pub slots: i32, +} +/// Replica specification for distributed MPI training +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingReplicaSpec { + /// 1~4 deprecated. Use common instead. + /// Number of replicas + #[deprecated] + #[prost(int32, tag="1")] + pub replicas: i32, + /// Image used for the replica group + #[deprecated] + #[prost(string, tag="2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[deprecated] + #[prost(message, optional, tag="3")] + pub resources: ::core::option::Option, + /// Restart policy determines whether pods will be restarted when they exit + #[deprecated] + #[prost(enumeration="super::RestartPolicy", tag="4")] + pub restart_policy: i32, + /// MPI sometimes requires different command set for different replica groups + #[prost(string, repeated, tag="5")] + pub command: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + /// The common replica spec + #[prost(message, optional, tag="6")] + pub common: ::core::option::Option, +} +/// Custom proto for torch elastic config for distributed training using +/// +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ElasticConfig { + #[prost(string, tag="1")] + pub rdzv_backend: ::prost::alloc::string::String, + #[prost(int32, tag="2")] + pub min_replicas: i32, + #[prost(int32, tag="3")] + pub max_replicas: i32, + #[prost(int32, tag="4")] + pub nproc_per_node: i32, + #[prost(int32, tag="5")] + pub max_restarts: i32, +} +/// Proto for plugin that enables distributed training using +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedPyTorchTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag="1")] + pub worker_replicas: ::core::option::Option, + /// Master replicas spec, master replicas can only have 1 replica + #[prost(message, optional, tag="2")] + pub master_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag="3")] + pub run_policy: ::core::option::Option, + /// config for an elastic pytorch job + #[prost(message, optional, tag="4")] + pub elastic_config: ::core::option::Option, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedPyTorchTrainingReplicaSpec { + /// 1~4 deprecated. Use common instead. + /// Number of replicas + #[deprecated] + #[prost(int32, tag="1")] + pub replicas: i32, + /// Image used for the replica group + #[deprecated] + #[prost(string, tag="2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[deprecated] + #[prost(message, optional, tag="3")] + pub resources: ::core::option::Option, + /// Restart policy determines whether pods will be restarted when they exit + #[deprecated] + #[prost(enumeration="super::RestartPolicy", tag="4")] + pub restart_policy: i32, + /// The common replica spec + #[prost(message, optional, tag="5")] + pub common: ::core::option::Option, +} +/// Proto for plugin that enables distributed training using +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingTask { + /// Worker replicas spec + #[prost(message, optional, tag="1")] + pub worker_replicas: ::core::option::Option, + /// Parameter server replicas spec + #[prost(message, optional, tag="2")] + pub ps_replicas: ::core::option::Option, + /// Chief replicas spec + #[prost(message, optional, tag="3")] + pub chief_replicas: ::core::option::Option, + /// RunPolicy encapsulates various runtime policies of the distributed training + /// job, for example how to clean up resources and how long the job can stay + /// active. + #[prost(message, optional, tag="4")] + pub run_policy: ::core::option::Option, + /// Evaluator replicas spec + #[prost(message, optional, tag="5")] + pub evaluator_replicas: ::core::option::Option, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingReplicaSpec { + /// 1~4 deprecated. Use common instead. + /// Number of replicas + #[deprecated] + #[prost(int32, tag="1")] + pub replicas: i32, + /// Image used for the replica group + #[deprecated] + #[prost(string, tag="2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[deprecated] + #[prost(message, optional, tag="3")] + pub resources: ::core::option::Option, + /// Restart policy determines whether pods will be restarted when they exit + #[deprecated] + #[prost(enumeration="super::RestartPolicy", tag="4")] + pub restart_policy: i32, + /// The common replica spec + #[prost(message, optional, tag="5")] + pub common: ::core::option::Option, +} +/// Encoded file descriptor set for the `flyteidl2.plugins.kubeflow` package +pub const FILE_DESCRIPTOR_SET: &[u8] = &[ + 0x0a, 0xb8, 0x0b, 0x0a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x22, 0xfb, 0x01, 0x0a, 0x09, 0x52, 0x75, 0x6e, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x54, 0x0a, 0x10, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x5f, + 0x70, 0x6f, 0x64, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x43, 0x6c, + 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0e, 0x63, 0x6c, + 0x65, 0x61, 0x6e, 0x50, 0x6f, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3b, 0x0a, 0x1a, + 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x17, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x41, 0x66, 0x74, 0x65, + 0x72, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x36, 0x0a, 0x17, 0x61, 0x63, 0x74, + 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x15, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, + 0x66, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x2a, 0x60, 0x0a, 0x0e, 0x43, 0x6c, 0x65, 0x61, 0x6e, 0x50, + 0x6f, 0x64, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, 0x14, 0x43, 0x4c, 0x45, 0x41, + 0x4e, 0x50, 0x4f, 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4e, 0x4f, 0x4e, 0x45, + 0x10, 0x00, 0x12, 0x1b, 0x0a, 0x17, 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x50, 0x4f, 0x44, 0x5f, 0x50, + 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x52, 0x55, 0x4e, 0x4e, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, + 0x17, 0x0a, 0x13, 0x43, 0x4c, 0x45, 0x41, 0x4e, 0x50, 0x4f, 0x44, 0x5f, 0x50, 0x4f, 0x4c, 0x49, + 0x43, 0x59, 0x5f, 0x41, 0x4c, 0x4c, 0x10, 0x02, 0x42, 0xf7, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x0b, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, + 0xaa, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, + 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, + 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, + 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, + 0x6f, 0x77, 0x4a, 0x8e, 0x06, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x19, 0x01, 0x0a, 0x08, 0x0a, + 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, + 0x23, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, 0x55, 0x0a, 0x09, 0x0a, 0x02, 0x08, + 0x0b, 0x12, 0x03, 0x04, 0x00, 0x55, 0x0a, 0x0a, 0x0a, 0x02, 0x05, 0x00, 0x12, 0x04, 0x06, 0x00, + 0x0a, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x05, 0x00, 0x01, 0x12, 0x03, 0x06, 0x05, 0x13, 0x0a, 0x0b, + 0x0a, 0x04, 0x05, 0x00, 0x02, 0x00, 0x12, 0x03, 0x07, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x05, + 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x07, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, + 0x00, 0x02, 0x12, 0x03, 0x07, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x05, 0x00, 0x02, 0x01, 0x12, + 0x03, 0x08, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x08, + 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x08, 0x1c, 0x1d, + 0x0a, 0x0b, 0x0a, 0x04, 0x05, 0x00, 0x02, 0x02, 0x12, 0x03, 0x09, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, + 0x05, 0x05, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x09, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x05, + 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x09, 0x18, 0x19, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, + 0x04, 0x0c, 0x00, 0x19, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0c, 0x08, + 0x11, 0x0a, 0x58, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0e, 0x02, 0x26, 0x1a, 0x4b, + 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x20, 0x74, 0x6f, 0x20, 0x6b, 0x69, 0x6c, 0x6c, 0x20, 0x70, 0x6f, 0x64, 0x73, + 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x63, + 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x73, 0x2e, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x20, 0x74, 0x6f, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0e, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x0e, 0x11, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x0e, 0x24, 0x25, 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x11, + 0x02, 0x27, 0x1a, 0x2c, 0x20, 0x54, 0x54, 0x4c, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x6c, 0x65, 0x61, + 0x6e, 0x20, 0x75, 0x70, 0x20, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x69, 0x6e, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x65, 0x2e, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x11, 0x02, 0x07, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x11, 0x08, 0x22, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x11, 0x25, 0x26, 0x0a, 0xb0, 0x01, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x02, 0x12, 0x03, 0x15, 0x02, 0x24, 0x1a, 0xa2, 0x01, 0x20, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x20, 0x72, 0x65, + 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x6d, 0x61, 0x79, 0x20, 0x62, 0x65, 0x20, 0x61, 0x63, 0x74, 0x69, + 0x76, 0x65, 0x0a, 0x20, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, + 0x79, 0x73, 0x74, 0x65, 0x6d, 0x20, 0x74, 0x72, 0x69, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, + 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x65, 0x20, 0x69, 0x74, 0x3b, 0x20, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x20, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x67, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x15, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x15, 0x08, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x15, 0x22, 0x23, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, + 0x03, 0x18, 0x02, 0x1a, 0x1a, 0x33, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, + 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x73, 0x20, 0x62, 0x65, 0x66, 0x6f, 0x72, 0x65, 0x20, + 0x6d, 0x61, 0x72, 0x6b, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, + 0x20, 0x66, 0x61, 0x69, 0x6c, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x03, 0x05, 0x12, 0x03, 0x18, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x18, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, + 0x18, 0x18, 0x19, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xaf, 0x14, 0x0a, 0x24, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6d, 0x70, 0x69, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, + 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, + 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xcc, 0x02, 0x0a, 0x1a, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x73, 0x6b, 0x12, 0x66, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6a, 0x0a, 0x11, + 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x10, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, 0x6e, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x14, + 0x0a, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x73, + 0x6c, 0x6f, 0x74, 0x73, 0x22, 0xbf, 0x02, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x08, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, + 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x05, 0x69, 0x6d, + 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x69, + 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, + 0x0a, 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, + 0x07, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0xf4, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x08, 0x4d, 0x70, 0x69, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, 0x02, 0x1a, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, + 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0xf5, 0x0b, + 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x2f, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, + 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x23, 0x0a, 0x09, 0x0a, 0x02, + 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, + 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x06, 0x00, 0x31, 0x0a, 0x08, 0x0a, + 0x01, 0x08, 0x12, 0x03, 0x08, 0x00, 0x55, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x08, + 0x00, 0x55, 0x0a, 0x6f, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0b, 0x00, 0x19, 0x01, 0x1a, 0x63, + 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x64, + 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6d, 0x70, 0x69, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x22, 0x0a, + 0x23, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0d, 0x02, 0x38, 0x1a, 0x16, 0x20, 0x57, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, + 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0d, + 0x02, 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0d, 0x24, 0x33, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0d, 0x36, 0x37, 0x0a, 0x23, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x10, 0x02, 0x3a, 0x1a, 0x16, 0x20, 0x4d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, + 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, 0x10, 0x02, + 0x23, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x10, 0x24, 0x35, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x10, 0x38, 0x39, 0x0a, 0xae, 0x01, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x15, 0x02, 0x1b, 0x1a, 0xa0, 0x01, 0x20, 0x52, + 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, + 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x76, 0x61, 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x20, 0x6f, + 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x6a, 0x6f, 0x62, 0x2c, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x68, 0x6f, 0x77, + 0x20, 0x74, 0x6f, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, 0x20, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x6f, 0x77, 0x20, 0x6c, + 0x6f, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x63, 0x61, 0x6e, 0x20, + 0x73, 0x74, 0x61, 0x79, 0x0a, 0x20, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, 0x12, 0x03, 0x15, 0x02, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x15, 0x0c, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x02, 0x03, 0x12, 0x03, 0x15, 0x19, 0x1a, 0x0a, 0x29, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, + 0x12, 0x03, 0x18, 0x02, 0x12, 0x1a, 0x1c, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, + 0x66, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, 0x12, 0x03, 0x18, 0x02, + 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x18, 0x08, 0x0d, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x18, 0x10, 0x11, 0x0a, 0x40, 0x0a, + 0x02, 0x04, 0x01, 0x12, 0x04, 0x1c, 0x00, 0x2f, 0x01, 0x1a, 0x34, 0x20, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x64, 0x20, 0x4d, 0x50, 0x49, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x0a, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x1c, 0x08, 0x29, 0x0a, 0x46, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x00, 0x12, 0x03, 0x1f, 0x02, 0x29, 0x1a, 0x39, 0x20, 0x31, 0x7e, 0x34, 0x20, 0x64, + 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x20, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x65, 0x61, 0x64, 0x2e, 0x0a, 0x20, + 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, 0x1f, 0x02, + 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1f, 0x08, 0x10, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1f, 0x13, 0x14, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x00, 0x08, 0x12, 0x03, 0x1f, 0x15, 0x28, 0x0a, 0x0d, 0x0a, 0x06, 0x04, + 0x01, 0x02, 0x00, 0x08, 0x03, 0x12, 0x03, 0x1f, 0x16, 0x27, 0x0a, 0x2f, 0x0a, 0x04, 0x04, 0x01, + 0x02, 0x01, 0x12, 0x03, 0x22, 0x02, 0x27, 0x1a, 0x22, 0x20, 0x49, 0x6d, 0x61, 0x67, 0x65, 0x20, + 0x75, 0x73, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x22, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x22, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x22, 0x11, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x08, 0x12, 0x03, + 0x22, 0x13, 0x26, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x01, 0x08, 0x03, 0x12, 0x03, 0x22, + 0x14, 0x25, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x25, 0x02, 0x33, 0x1a, + 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x02, 0x06, 0x12, 0x03, 0x25, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x25, 0x11, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, + 0x12, 0x03, 0x25, 0x1d, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x08, 0x12, 0x03, + 0x25, 0x1f, 0x32, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x02, 0x08, 0x03, 0x12, 0x03, 0x25, + 0x20, 0x31, 0x0a, 0x56, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x28, 0x02, 0x37, 0x1a, + 0x49, 0x20, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, + 0x68, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x79, 0x20, 0x65, 0x78, 0x69, 0x74, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x03, 0x06, 0x12, 0x03, 0x28, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, + 0x01, 0x12, 0x03, 0x28, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, + 0x03, 0x28, 0x21, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x08, 0x12, 0x03, 0x28, + 0x23, 0x36, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x03, 0x08, 0x03, 0x12, 0x03, 0x28, 0x24, + 0x35, 0x0a, 0x58, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x2b, 0x02, 0x1e, 0x1a, 0x4b, + 0x20, 0x4d, 0x50, 0x49, 0x20, 0x73, 0x6f, 0x6d, 0x65, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x20, 0x72, + 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x73, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, + 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x20, 0x73, 0x65, 0x74, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x64, 0x69, 0x66, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x04, 0x04, 0x12, 0x03, 0x2b, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x2b, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x2b, 0x12, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x2b, 0x1c, 0x1d, 0x0a, 0x26, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x05, 0x12, 0x03, 0x2e, 0x02, 0x1f, + 0x1a, 0x19, 0x20, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x05, 0x06, 0x12, 0x03, 0x2e, 0x02, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x05, 0x01, 0x12, 0x03, 0x2e, 0x14, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x03, + 0x12, 0x03, 0x2e, 0x1d, 0x1e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xef, 0x18, + 0x0a, 0x28, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x79, 0x74, + 0x6f, 0x72, 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xc1, 0x01, 0x0a, 0x0d, + 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x64, 0x7a, 0x76, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x64, 0x7a, 0x76, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, + 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x22, + 0x90, 0x03, 0x0a, 0x1e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, + 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x73, 0x6b, 0x12, 0x6a, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, + 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, + 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6a, + 0x0a, 0x0f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x41, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x6d, 0x61, 0x73, 0x74, + 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, + 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x50, 0x0a, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, + 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0d, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x22, 0xa9, 0x02, 0x0a, 0x25, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x08, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, + 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x18, 0x0a, 0x05, + 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, + 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x42, 0x02, 0x18, + 0x01, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x42, 0xf8, + 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x42, 0x0c, 0x50, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, + 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, + 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, + 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x4a, 0xbb, 0x0e, 0x0a, 0x06, 0x12, 0x04, + 0x00, 0x00, 0x35, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, + 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x23, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, + 0x04, 0x00, 0x24, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x28, 0x0a, 0x09, + 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x06, 0x00, 0x31, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, + 0x08, 0x00, 0x55, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x08, 0x00, 0x55, 0x0a, 0xb8, + 0x01, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0c, 0x00, 0x12, 0x01, 0x1a, 0xab, 0x01, 0x20, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x6f, 0x72, 0x63, 0x68, 0x20, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x20, 0x63, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, + 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, + 0x2f, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x6f, 0x72, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, + 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, + 0x12, 0x03, 0x0c, 0x08, 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0d, + 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x0d, 0x02, 0x08, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0d, 0x09, 0x15, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0d, 0x18, 0x19, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x01, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x0e, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, + 0x0e, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0f, 0x02, 0x19, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0f, 0x02, 0x07, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0f, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0f, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, + 0x02, 0x03, 0x12, 0x03, 0x10, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, + 0x12, 0x03, 0x10, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, + 0x10, 0x08, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x10, 0x19, + 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x11, 0x02, 0x19, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x05, 0x12, 0x03, 0x11, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x11, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x04, 0x03, 0x12, 0x03, 0x11, 0x17, 0x18, 0x0a, 0x73, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, + 0x15, 0x00, 0x23, 0x01, 0x1a, 0x67, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, + 0x62, 0x6c, 0x65, 0x73, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, + 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x70, 0x79, 0x74, 0x6f, + 0x72, 0x63, 0x68, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x15, 0x08, 0x26, 0x0a, 0x23, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x00, 0x12, 0x03, 0x17, 0x02, 0x3c, 0x1a, 0x16, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x17, 0x02, 0x27, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x17, 0x28, 0x37, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x17, 0x3a, 0x3b, 0x0a, 0x4c, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, + 0x12, 0x03, 0x1a, 0x02, 0x3c, 0x1a, 0x3f, 0x20, 0x4d, 0x61, 0x73, 0x74, 0x65, 0x72, 0x20, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x2c, 0x20, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x63, 0x61, + 0x6e, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, 0x68, 0x61, 0x76, 0x65, 0x20, 0x31, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, + 0x03, 0x1a, 0x02, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1a, + 0x28, 0x37, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1a, 0x3a, 0x3b, + 0x0a, 0xae, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x1f, 0x02, 0x1b, 0x1a, 0xa0, + 0x01, 0x20, 0x52, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x20, 0x65, 0x6e, 0x63, 0x61, + 0x70, 0x73, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x20, 0x76, 0x61, 0x72, 0x69, 0x6f, 0x75, 0x73, + 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, + 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x6a, + 0x6f, 0x62, 0x2c, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, + 0x68, 0x6f, 0x77, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, 0x20, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x6f, + 0x77, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x63, + 0x61, 0x6e, 0x20, 0x73, 0x74, 0x61, 0x79, 0x0a, 0x20, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x06, 0x12, 0x03, 0x1f, 0x02, 0x0b, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x1f, 0x0c, 0x16, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x1f, 0x19, 0x1a, 0x0a, 0x30, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x03, 0x12, 0x03, 0x22, 0x02, 0x23, 0x1a, 0x23, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x20, 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x03, 0x06, 0x12, 0x03, 0x22, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x03, 0x01, 0x12, 0x03, 0x22, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x22, 0x21, 0x22, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x25, + 0x00, 0x35, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x25, 0x08, 0x2d, 0x0a, + 0x46, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x28, 0x02, 0x29, 0x1a, 0x39, 0x20, 0x31, + 0x7e, 0x34, 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x20, 0x55, + 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x65, 0x61, + 0x64, 0x2e, 0x0a, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, + 0x12, 0x03, 0x28, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x28, 0x08, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x28, 0x13, + 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x08, 0x12, 0x03, 0x28, 0x15, 0x28, 0x0a, + 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x00, 0x08, 0x03, 0x12, 0x03, 0x28, 0x16, 0x27, 0x0a, 0x2f, + 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2b, 0x02, 0x27, 0x1a, 0x22, 0x20, 0x49, 0x6d, + 0x61, 0x67, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x05, 0x12, 0x03, 0x2b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x2b, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x2b, 0x11, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x08, 0x12, 0x03, 0x2b, 0x13, 0x26, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x01, 0x08, + 0x03, 0x12, 0x03, 0x2b, 0x14, 0x25, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, + 0x2e, 0x02, 0x33, 0x1a, 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x06, 0x12, 0x03, 0x2e, 0x02, 0x10, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, 0x2e, 0x11, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x2e, 0x1d, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x02, 0x08, 0x12, 0x03, 0x2e, 0x1f, 0x32, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x02, 0x08, + 0x03, 0x12, 0x03, 0x2e, 0x20, 0x31, 0x0a, 0x56, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, + 0x31, 0x02, 0x37, 0x1a, 0x49, 0x20, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x70, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x73, 0x20, + 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x77, + 0x68, 0x65, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, 0x65, 0x78, 0x69, 0x74, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x06, 0x12, 0x03, 0x31, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x03, 0x01, 0x12, 0x03, 0x31, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x03, 0x03, 0x12, 0x03, 0x31, 0x21, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, + 0x08, 0x12, 0x03, 0x31, 0x23, 0x36, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x02, 0x02, 0x03, 0x08, 0x03, + 0x12, 0x03, 0x31, 0x24, 0x35, 0x0a, 0x26, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x04, 0x12, 0x03, 0x34, + 0x02, 0x1f, 0x1a, 0x19, 0x20, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x04, 0x06, 0x12, 0x03, 0x34, 0x02, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x04, 0x01, 0x12, 0x03, 0x34, 0x14, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x04, 0x03, 0x12, 0x03, 0x34, 0x1d, 0x1e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, + 0x8b, 0x15, 0x0a, 0x2b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, + 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x1a, 0x1a, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, + 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, + 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x27, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, + 0x6c, 0x6f, 0x77, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xa1, 0x04, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, + 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x6d, 0x0a, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, + 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x44, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, + 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x65, 0x0a, 0x0b, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, + 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, + 0x52, 0x0a, 0x70, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x6b, 0x0a, 0x0e, + 0x63, 0x68, 0x69, 0x65, 0x66, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, + 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0d, 0x63, 0x68, 0x69, 0x65, + 0x66, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x44, 0x0a, 0x0a, 0x72, 0x75, 0x6e, + 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x52, 0x75, 0x6e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x09, 0x72, 0x75, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, + 0x73, 0x0a, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x44, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, + 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, + 0x63, 0x52, 0x11, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x22, 0xac, 0x02, 0x0a, 0x28, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, + 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, + 0x63, 0x12, 0x1e, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x12, 0x18, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x3b, 0x0a, 0x09, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x42, 0x02, 0x18, 0x01, 0x52, 0x09, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, + 0x61, 0x72, 0x74, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3c, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x52, 0x06, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x42, 0xfb, 0x01, 0x0a, 0x1e, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x6b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x42, 0x0f, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, + 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3e, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x4b, 0xaa, + 0x02, 0x1a, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xca, 0x02, 0x1a, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x5c, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0xe2, 0x02, 0x26, 0x46, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x4b, 0x75, + 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0xea, 0x02, 0x1c, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x3a, 0x3a, 0x4b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, + 0x77, 0x4a, 0x81, 0x0b, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x2e, 0x01, 0x0a, 0x08, 0x0a, 0x01, + 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x23, + 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x09, 0x0a, 0x02, 0x03, + 0x01, 0x12, 0x03, 0x05, 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, 0x03, 0x06, 0x00, + 0x31, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x08, 0x00, 0x55, 0x0a, 0x09, 0x0a, 0x02, 0x08, + 0x0b, 0x12, 0x03, 0x08, 0x00, 0x55, 0x0a, 0x6e, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0b, 0x00, + 0x1c, 0x01, 0x1a, 0x62, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, + 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x74, + 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, 0x66, 0x2d, 0x6f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0b, + 0x08, 0x29, 0x0a, 0x23, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0d, 0x02, 0x3f, 0x1a, + 0x16, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, + 0x12, 0x03, 0x0d, 0x02, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, + 0x0d, 0x2b, 0x3a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0d, 0x3d, + 0x3e, 0x0a, 0x2d, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x10, 0x02, 0x3b, 0x1a, 0x20, + 0x20, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, 0x10, 0x02, 0x2a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x10, 0x2b, 0x36, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x10, 0x39, 0x3a, 0x0a, 0x22, 0x0a, 0x04, 0x04, 0x00, + 0x02, 0x02, 0x12, 0x03, 0x13, 0x02, 0x3e, 0x1a, 0x15, 0x20, 0x43, 0x68, 0x69, 0x65, 0x66, 0x20, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, 0x12, 0x03, 0x13, 0x02, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x13, 0x2b, 0x39, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x02, 0x03, 0x12, 0x03, 0x13, 0x3c, 0x3d, 0x0a, 0xae, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x03, 0x12, 0x03, 0x18, 0x02, 0x1b, 0x1a, 0xa0, 0x01, 0x20, 0x52, 0x75, 0x6e, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, 0x6c, 0x61, 0x74, 0x65, 0x73, + 0x20, 0x76, 0x61, 0x72, 0x69, 0x6f, 0x75, 0x73, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x20, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x69, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x6a, 0x6f, 0x62, 0x2c, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x68, 0x6f, 0x77, 0x20, 0x74, 0x6f, 0x20, 0x63, + 0x6c, 0x65, 0x61, 0x6e, 0x20, 0x75, 0x70, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x68, 0x6f, 0x77, 0x20, 0x6c, 0x6f, 0x6e, 0x67, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x74, 0x61, 0x79, 0x0a, + 0x20, 0x61, 0x63, 0x74, 0x69, 0x76, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x03, 0x06, 0x12, 0x03, 0x18, 0x02, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x18, 0x0c, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, + 0x18, 0x19, 0x1a, 0x0a, 0x26, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x1b, 0x02, 0x42, + 0x1a, 0x19, 0x20, 0x45, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x04, 0x06, 0x12, 0x03, 0x1b, 0x02, 0x2a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x04, 0x01, 0x12, 0x03, 0x1b, 0x2b, 0x3d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, + 0x12, 0x03, 0x1b, 0x40, 0x41, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x1e, 0x00, 0x2e, + 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x1e, 0x08, 0x30, 0x0a, 0x46, 0x0a, + 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x21, 0x02, 0x29, 0x1a, 0x39, 0x20, 0x31, 0x7e, 0x34, + 0x20, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x20, 0x55, 0x73, 0x65, + 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x65, 0x61, 0x64, 0x2e, + 0x0a, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, + 0x21, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x21, 0x08, + 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x21, 0x13, 0x14, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x08, 0x12, 0x03, 0x21, 0x15, 0x28, 0x0a, 0x0d, 0x0a, + 0x06, 0x04, 0x01, 0x02, 0x00, 0x08, 0x03, 0x12, 0x03, 0x21, 0x16, 0x27, 0x0a, 0x2f, 0x0a, 0x04, + 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x24, 0x02, 0x27, 0x1a, 0x22, 0x20, 0x49, 0x6d, 0x61, 0x67, + 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x24, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x24, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x24, 0x11, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x08, + 0x12, 0x03, 0x24, 0x13, 0x26, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x01, 0x08, 0x03, 0x12, + 0x03, 0x24, 0x14, 0x25, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x27, 0x02, + 0x33, 0x1a, 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x02, 0x06, 0x12, 0x03, 0x27, 0x02, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x27, 0x11, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x02, 0x03, 0x12, 0x03, 0x27, 0x1d, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x08, + 0x12, 0x03, 0x27, 0x1f, 0x32, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x02, 0x08, 0x03, 0x12, + 0x03, 0x27, 0x20, 0x31, 0x0a, 0x56, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x2a, 0x02, + 0x37, 0x1a, 0x49, 0x20, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x70, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x77, 0x68, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, + 0x62, 0x65, 0x20, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x77, 0x68, 0x65, + 0x6e, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, 0x65, 0x78, 0x69, 0x74, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x03, 0x06, 0x12, 0x03, 0x2a, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x03, 0x01, 0x12, 0x03, 0x2a, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, + 0x03, 0x12, 0x03, 0x2a, 0x21, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x08, 0x12, + 0x03, 0x2a, 0x23, 0x36, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x01, 0x02, 0x03, 0x08, 0x03, 0x12, 0x03, + 0x2a, 0x24, 0x35, 0x0a, 0x26, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x2d, 0x02, 0x1f, + 0x1a, 0x19, 0x20, 0x54, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x73, 0x70, 0x65, 0x63, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x04, 0x06, 0x12, 0x03, 0x2d, 0x02, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x04, 0x01, 0x12, 0x03, 0x2d, 0x14, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, + 0x12, 0x03, 0x2d, 0x1d, 0x1e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +]; +// @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/rust/src/flyteidl2.plugins.rs b/gen/rust/src/flyteidl2.plugins.rs index 9e57d6d6a6..7c6f502052 100644 --- a/gen/rust/src/flyteidl2.plugins.rs +++ b/gen/rust/src/flyteidl2.plugins.rs @@ -1,5 +1,52 @@ // @generated // This file is @generated by prost-build. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CommonReplicaSpec { + /// Number of replicas + #[prost(int32, tag="1")] + pub replicas: i32, + /// Image used for the replica group + #[prost(string, tag="2")] + pub image: ::prost::alloc::string::String, + /// Resources required for the replica group + #[prost(message, optional, tag="3")] + pub resources: ::core::option::Option, + /// RestartPolicy determines whether pods will be restarted when they exit + #[prost(enumeration="RestartPolicy", tag="4")] + pub restart_policy: i32, +} +#[pyo3::pyclass(dict, get_all, set_all)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum RestartPolicy { + Never = 0, + OnFailure = 1, + Always = 2, +} +impl RestartPolicy { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + RestartPolicy::Never => "RESTART_POLICY_NEVER", + RestartPolicy::OnFailure => "RESTART_POLICY_ON_FAILURE", + RestartPolicy::Always => "RESTART_POLICY_ALWAYS", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "RESTART_POLICY_NEVER" => Some(Self::Never), + "RESTART_POLICY_ON_FAILURE" => Some(Self::OnFailure), + "RESTART_POLICY_ALWAYS" => Some(Self::Always), + _ => None, + } + } +} /// Represents a subset of runtime task execution metadata that are relevant to external plugins. /// /// ID of the task execution @@ -356,6 +403,39 @@ pub struct DaskWorkerGroup { #[prost(message, optional, tag="3")] pub resources: ::core::option::Option, } +/// MPI operator proposal +/// Custom proto for plugin that enables distributed training using +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct DistributedMpiTrainingTask { + /// number of worker spawned in the cluster for this job + #[prost(int32, tag="1")] + pub num_workers: i32, + /// number of launcher replicas spawned in the cluster for this job + /// The launcher pod invokes mpirun and communicates with worker pods through MPI. + #[prost(int32, tag="2")] + pub num_launcher_replicas: i32, + /// number of slots per worker used in hostfile. + /// The available slots (GPUs) in each pod. + #[prost(int32, tag="3")] + pub slots: i32, +} +/// This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field +/// of a Presto task's TaskTemplate +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct PrestoQuery { + #[prost(string, tag="1")] + pub routing_group: ::prost::alloc::string::String, + #[prost(string, tag="2")] + pub catalog: ::prost::alloc::string::String, + #[prost(string, tag="3")] + pub schema: ::prost::alloc::string::String, + #[prost(string, tag="4")] + pub statement: ::prost::alloc::string::String, +} /// Custom proto for torch elastic config for distributed training using /// #[pyo3::pyclass(dict, get_all, set_all)] @@ -385,6 +465,42 @@ pub struct DistributedPyTorchTrainingTask { #[prost(message, optional, tag="2")] pub elastic_config: ::core::option::Option, } +/// Defines a query to execute on a hive cluster. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HiveQuery { + #[prost(string, tag="1")] + pub query: ::prost::alloc::string::String, + #[prost(uint32, tag="2")] + pub timeout_sec: u32, + #[prost(uint32, tag="3")] + pub retry_count: u32, +} +/// Defines a collection of hive queries. +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct HiveQueryCollection { + #[prost(message, repeated, tag="2")] + pub queries: ::prost::alloc::vec::Vec, +} +/// This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field +/// of a hive task's TaskTemplate +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct QuboleHiveJob { + #[prost(string, tag="1")] + pub cluster_label: ::prost::alloc::string::String, + #[deprecated] + #[prost(message, optional, tag="2")] + pub query_collection: ::core::option::Option, + #[prost(string, repeated, tag="3")] + pub tags: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, + #[prost(message, optional, tag="4")] + pub query: ::core::option::Option, +} /// RayJobSpec defines the desired state of RayJob #[pyo3::pyclass(dict, get_all, set_all)] #[allow(clippy::derive_partial_eq_without_eq)] @@ -540,413 +656,695 @@ pub struct SparkJob { #[prost(message, optional, tag="11")] pub executor_pod: ::core::option::Option, } +/// Custom proto for plugin that enables distributed training using +#[pyo3::pyclass(dict, get_all, set_all)] +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, Copy, PartialEq, ::prost::Message)] +pub struct DistributedTensorflowTrainingTask { + /// number of worker replicas spawned in the cluster for this job + #[prost(int32, tag="1")] + pub workers: i32, + /// PS -> Parameter server + /// number of ps replicas spawned in the cluster for this job + #[prost(int32, tag="2")] + pub ps_replicas: i32, + /// number of chief replicas spawned in the cluster for this job + #[prost(int32, tag="3")] + pub chief_replicas: i32, + /// number of evaluator replicas spawned in the cluster for this job + #[prost(int32, tag="4")] + pub evaluator_replicas: i32, +} /// Encoded file descriptor set for the `flyteidl2.plugins` package pub const FILE_DESCRIPTOR_SET: &[u8] = &[ - 0x0a, 0xf6, 0x66, 0x0a, 0x21, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, - 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x74, - 0x61, 0x73, 0x6b, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, - 0xe9, 0x06, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, 0x11, 0x74, 0x61, 0x73, - 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, - 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, 0x0f, 0x74, - 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1c, - 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4c, 0x0a, 0x06, - 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x39, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x38, 0x73, 0x5f, 0x73, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x38, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x77, 0x0a, 0x15, 0x65, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x65, 0x6e, 0x76, 0x69, - 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, - 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, 0x65, 0x6d, - 0x70, 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, - 0x69, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, 0x74, 0x65, - 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x46, 0x0a, 0x1f, 0x69, 0x6e, 0x74, - 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x61, 0x69, 0x6c, 0x75, - 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x1d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, - 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x12, 0x34, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, 0x08, 0x69, - 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, - 0x38, 0x01, 0x1a, 0x47, 0x0a, 0x19, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, - 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x02, 0x0a, 0x11, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x74, 0x61, - 0x73, 0x6b, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x52, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, - 0x73, 0x12, 0x38, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x60, 0x0a, 0x17, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x15, 0x74, 0x61, 0x73, - 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x39, - 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xc7, 0x02, 0x0a, 0x13, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, - 0x72, 0x12, 0x38, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, - 0x65, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, - 0x12, 0x60, 0x0a, 0x17, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, - 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, + 0x0a, 0xbf, 0x09, 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0xc7, 0x01, 0x0a, 0x11, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0d, 0x72, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x2a, 0x63, 0x0a, 0x0d, + 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x18, 0x0a, + 0x14, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, 0x54, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, + 0x4e, 0x45, 0x56, 0x45, 0x52, 0x10, 0x00, 0x12, 0x1d, 0x0a, 0x19, 0x52, 0x45, 0x53, 0x54, 0x41, + 0x52, 0x54, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x4f, 0x4e, 0x5f, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x12, 0x19, 0x0a, 0x15, 0x52, 0x45, 0x53, 0x54, 0x41, 0x52, + 0x54, 0x5f, 0x50, 0x4f, 0x4c, 0x49, 0x43, 0x59, 0x5f, 0x41, 0x4c, 0x57, 0x41, 0x59, 0x53, 0x10, + 0x02, 0x42, 0xc0, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0b, 0x43, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, + 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, + 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xf3, 0x04, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x1a, 0x01, 0x0a, + 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, + 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x08, + 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, + 0x06, 0x00, 0x4c, 0x0a, 0x0a, 0x0a, 0x02, 0x05, 0x00, 0x12, 0x04, 0x08, 0x00, 0x0c, 0x01, 0x0a, + 0x0a, 0x0a, 0x03, 0x05, 0x00, 0x01, 0x12, 0x03, 0x08, 0x05, 0x12, 0x0a, 0x0b, 0x0a, 0x04, 0x05, + 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x09, 0x02, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x00, 0x02, 0x12, + 0x03, 0x09, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x05, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0a, 0x02, + 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0a, 0x02, 0x1b, 0x0a, + 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x0a, 0x1e, 0x1f, 0x0a, 0x0b, 0x0a, + 0x04, 0x05, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0b, 0x02, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, + 0x02, 0x02, 0x01, 0x12, 0x03, 0x0b, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x05, 0x00, 0x02, 0x02, + 0x02, 0x12, 0x03, 0x0b, 0x1a, 0x1b, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x0e, 0x00, + 0x1a, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0e, 0x08, 0x19, 0x0a, 0x21, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x10, 0x02, 0x15, 0x1a, 0x14, 0x20, 0x4e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x10, 0x02, 0x07, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x10, 0x08, 0x10, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x10, 0x13, 0x14, 0x0a, 0x2f, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x01, 0x12, 0x03, 0x13, 0x02, 0x13, 0x1a, 0x22, 0x20, 0x49, 0x6d, 0x61, 0x67, 0x65, + 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x13, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x01, 0x01, 0x12, 0x03, 0x13, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, + 0x03, 0x12, 0x03, 0x13, 0x11, 0x12, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, + 0x16, 0x02, 0x1f, 0x1a, 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, 0x12, 0x03, 0x16, 0x02, 0x10, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x16, 0x11, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x16, 0x1d, 0x1e, 0x0a, 0x55, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x03, 0x12, 0x03, 0x19, 0x02, 0x23, 0x1a, 0x48, 0x20, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, 0x65, + 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x77, + 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, + 0x20, 0x77, 0x68, 0x65, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x79, 0x20, 0x65, 0x78, 0x69, 0x74, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x06, 0x12, 0x03, 0x19, 0x02, 0x0f, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x19, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x19, 0x21, 0x22, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, 0x0a, 0xf6, 0x66, 0x0a, 0x21, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x1e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x69, 0x64, 0x65, 0x6e, + 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x6d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1d, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x73, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x22, 0xe9, 0x06, 0x0a, 0x15, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x53, 0x0a, 0x11, 0x74, + 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, 0x52, + 0x0f, 0x74, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, + 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x4c, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5b, 0x0a, 0x0b, + 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x39, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x15, 0x74, 0x61, 0x73, + 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x6b, 0x38, 0x73, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x38, 0x73, 0x53, 0x65, 0x72, 0x76, 0x69, + 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x77, 0x0a, 0x15, 0x65, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, - 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x53, 0x69, - 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x44, 0x0a, 0x0d, 0x74, - 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, - 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, - 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, - 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, - 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x61, 0x2e, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, + 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x14, 0x65, 0x6e, + 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, + 0x65, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, + 0x74, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x41, 0x74, 0x74, + 0x65, 0x6d, 0x70, 0x74, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, + 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x69, 0x6e, + 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x12, 0x46, 0x0a, 0x1f, 0x69, + 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x5f, 0x66, 0x61, 0x69, + 0x6c, 0x75, 0x72, 0x65, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x1d, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, + 0x62, 0x6c, 0x65, 0x46, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x12, 0x34, 0x0a, 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x52, + 0x08, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, + 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x47, 0x0a, 0x19, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, + 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xc0, 0x02, + 0x0a, 0x11, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x2e, 0x0a, 0x06, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x49, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x52, 0x06, 0x69, 0x6e, 0x70, + 0x75, 0x74, 0x73, 0x12, 0x38, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x60, 0x0a, 0x17, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x15, 0x74, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x39, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x22, 0xc7, 0x02, 0x0a, 0x13, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x12, 0x38, 0x0a, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x52, 0x08, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x50, 0x72, 0x65, 0x66, + 0x69, 0x78, 0x12, 0x60, 0x0a, 0x17, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x65, 0x78, 0x65, 0x63, 0x75, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x15, 0x74, + 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x73, 0x65, 0x74, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x79, 0x74, 0x65, 0x73, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, + 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, 0x74, 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xdc, 0x01, 0x0a, 0x0e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x44, 0x0a, + 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, + 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, + 0x6f, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x6f, 0x75, 0x74, 0x70, + 0x75, 0x74, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x22, 0x82, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x31, 0x0a, + 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, + 0x4f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x6f, + 0x67, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, + 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x6e, 0x6b, 0x73, + 0x12, 0x39, 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x23, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, + 0x68, 0x61, 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x63, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, + 0x6d, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xba, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, + 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, + 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, + 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x4a, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x65, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x22, 0x82, - 0x02, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x6f, - 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x4f, 0x75, - 0x74, 0x70, 0x75, 0x74, 0x73, 0x52, 0x07, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x6c, 0x6f, 0x67, 0x5f, - 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, 0x61, 0x73, - 0x6b, 0x4c, 0x6f, 0x67, 0x52, 0x08, 0x6c, 0x6f, 0x67, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x39, - 0x0a, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x23, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x50, 0x68, 0x61, - 0x73, 0x65, 0x52, 0x05, 0x70, 0x68, 0x61, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x0b, 0x63, 0x75, 0x73, - 0x74, 0x6f, 0x6d, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0a, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x49, - 0x6e, 0x66, 0x6f, 0x22, 0xba, 0x01, 0x0a, 0x11, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, - 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x44, + 0x6f, 0x6e, 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x19, 0x73, 0x75, 0x70, + 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, + 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x17, 0x73, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, + 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, + 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, + 0x52, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x22, 0x17, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x56, 0x0a, 0x16, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x73, 0x22, 0xbd, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, + 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, + 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, + 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, + 0x74, 0x69, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, - 0x67, 0x6f, 0x72, 0x79, 0x12, 0x3a, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x14, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x7c, 0x0a, 0x09, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x5b, 0x0a, 0x19, 0x73, 0x75, 0x70, 0x70, 0x6f, - 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, - 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x17, 0x73, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x69, 0x65, 0x73, 0x22, 0x3c, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, - 0x67, 0x6f, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x29, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x52, 0x0a, - 0x14, 0x47, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x22, 0x17, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x56, 0x0a, 0x16, 0x4c, 0x69, - 0x73, 0x74, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3c, 0x0a, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x43, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x0a, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x73, 0x22, 0xbd, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, - 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, - 0x61, 0x12, 0x18, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x0a, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, - 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x2d, 0x0a, - 0x04, 0x73, 0x74, 0x65, 0x70, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x73, 0x74, 0x65, 0x70, 0x12, 0x44, 0x0a, 0x0d, - 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, - 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, - 0x72, 0x79, 0x22, 0x59, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x07, - 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x25, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x45, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xab, 0x01, - 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6e, - 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x12, - 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x61, - 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, 0x0c, 0x74, - 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x22, 0x31, 0x0a, 0x19, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x33, - 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, - 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, 0x06, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, - 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x48, 0x00, 0x52, - 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x70, 0x61, 0x72, 0x74, 0x42, 0xc3, 0x01, - 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, - 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x4a, 0x9b, 0x46, 0x0a, 0x07, 0x12, 0x05, 0x00, 0x00, 0xdc, 0x01, 0x01, 0x0a, - 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, - 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x28, 0x0a, 0x09, - 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x02, 0x12, - 0x03, 0x06, 0x00, 0x26, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x03, 0x07, 0x00, 0x27, 0x0a, - 0x09, 0x0a, 0x02, 0x03, 0x04, 0x12, 0x03, 0x08, 0x00, 0x24, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x05, - 0x12, 0x03, 0x09, 0x00, 0x25, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x06, 0x12, 0x03, 0x0a, 0x00, 0x28, - 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x07, 0x12, 0x03, 0x0b, 0x00, 0x26, 0x0a, 0x09, 0x0a, 0x02, 0x03, - 0x08, 0x12, 0x03, 0x0c, 0x00, 0x29, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x0e, 0x00, 0x4c, - 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x0e, 0x00, 0x4c, 0x0a, 0x87, 0x01, 0x0a, 0x02, - 0x04, 0x00, 0x12, 0x04, 0x11, 0x00, 0x2b, 0x01, 0x1a, 0x5f, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, - 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, 0x20, 0x6f, - 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x72, 0x65, 0x6c, 0x65, 0x76, - 0x61, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x20, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x0a, 0x22, 0x1a, 0x20, 0x49, 0x44, 0x20, 0x6f, - 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, - 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x11, 0x08, - 0x1d, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x14, 0x02, 0x35, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x14, 0x02, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x14, 0x1f, 0x30, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x00, 0x03, 0x12, 0x03, 0x14, 0x33, 0x34, 0x0a, 0x3a, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, - 0x12, 0x03, 0x16, 0x02, 0x17, 0x1a, 0x2d, 0x20, 0x6b, 0x38, 0x73, 0x20, 0x6e, 0x61, 0x6d, 0x65, - 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x74, 0x61, 0x73, 0x6b, 0x20, 0x69, 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x64, - 0x20, 0x69, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x16, - 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x16, 0x09, 0x12, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x16, 0x15, 0x16, 0x0a, 0x34, - 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x18, 0x02, 0x21, 0x1a, 0x27, 0x20, 0x4c, 0x61, - 0x62, 0x65, 0x6c, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, 0x74, 0x6f, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, - 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, 0x12, 0x03, 0x18, - 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x18, 0x16, 0x1c, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, 0x1f, 0x20, 0x0a, 0x39, - 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x1a, 0x02, 0x26, 0x1a, 0x2c, 0x20, 0x41, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x03, 0x06, 0x12, 0x03, 0x1a, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, - 0x12, 0x03, 0x1a, 0x16, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, - 0x1a, 0x24, 0x25, 0x0a, 0x45, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x1c, 0x02, 0x21, - 0x1a, 0x38, 0x20, 0x6b, 0x38, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, 0x74, 0x65, - 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x04, 0x05, 0x12, 0x03, 0x1c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, - 0x01, 0x12, 0x03, 0x1c, 0x09, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, 0x12, - 0x03, 0x1c, 0x1f, 0x20, 0x0a, 0x43, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x05, 0x12, 0x03, 0x1e, 0x02, - 0x30, 0x1a, 0x36, 0x20, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x20, - 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, - 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x05, 0x06, 0x12, 0x03, 0x1e, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x01, - 0x12, 0x03, 0x1e, 0x16, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x03, 0x12, 0x03, - 0x1e, 0x2e, 0x2f, 0x0a, 0x98, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x06, 0x12, 0x03, 0x21, 0x02, - 0x19, 0x1a, 0x8a, 0x01, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x20, 0x61, - 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, - 0x6b, 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x66, 0x61, - 0x69, 0x6c, 0x73, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x72, - 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x20, 0x75, 0x70, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x69, - 0x73, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x20, 0x6f, 0x66, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x05, 0x12, 0x03, 0x21, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x06, 0x01, 0x12, 0x03, 0x21, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x06, 0x03, 0x12, 0x03, 0x21, 0x17, 0x18, 0x0a, 0x83, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, - 0x07, 0x12, 0x03, 0x24, 0x02, 0x19, 0x1a, 0x76, 0x20, 0x49, 0x6e, 0x64, 0x69, 0x63, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, - 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x63, 0x61, - 0x6e, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x65, 0x64, - 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x73, 0x65, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x72, 0x75, - 0x65, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x63, 0x61, 0x6e, 0x20, - 0x62, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x20, 0x62, 0x65, 0x66, 0x6f, 0x72, - 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x07, 0x05, 0x12, 0x03, 0x24, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x07, 0x01, 0x12, 0x03, 0x24, 0x07, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x07, 0x03, 0x12, 0x03, 0x24, 0x17, 0x18, 0x0a, 0xde, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, - 0x08, 0x12, 0x03, 0x28, 0x02, 0x2c, 0x1a, 0xd0, 0x01, 0x20, 0x53, 0x70, 0x65, 0x63, 0x69, 0x66, - 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, - 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x20, 0x61, 0x74, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x0a, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x74, 0x61, - 0x6b, 0x65, 0x20, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x6f, 0x6e, 0x73, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x66, 0x61, 0x69, - 0x6c, 0x75, 0x72, 0x65, 0x73, 0x20, 0x65, 0x78, 0x63, 0x65, 0x65, 0x64, 0x73, 0x20, 0x74, 0x68, - 0x69, 0x73, 0x20, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x2c, 0x0a, 0x20, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x62, 0x65, 0x68, - 0x61, 0x76, 0x69, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x61, 0x63, - 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x08, 0x05, 0x12, 0x03, 0x28, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x08, 0x01, - 0x12, 0x03, 0x28, 0x08, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x08, 0x03, 0x12, 0x03, - 0x28, 0x2a, 0x2b, 0x0a, 0x3b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x09, 0x12, 0x03, 0x2a, 0x02, 0x1e, - 0x1a, 0x2e, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x6f, 0x66, 0x20, 0x75, - 0x73, 0x65, 0x72, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x69, 0x73, - 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x09, 0x06, 0x12, 0x03, 0x2a, 0x02, 0x0f, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x09, 0x01, 0x12, 0x03, 0x2a, 0x10, 0x18, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x09, 0x03, 0x12, 0x03, 0x2a, 0x1b, 0x1d, 0x0a, 0x3c, 0x0a, 0x02, 0x04, 0x01, - 0x12, 0x04, 0x2e, 0x00, 0x3d, 0x01, 0x1a, 0x30, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, - 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, - 0x03, 0x2e, 0x08, 0x19, 0x0a, 0xab, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x32, - 0x02, 0x19, 0x1a, 0x9d, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, - 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, - 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x69, - 0x6e, 0x70, 0x75, 0x74, 0x73, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x0a, 0x20, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, - 0x6d, 0x61, 0x70, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, - 0x69, 0x72, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x70, 0x72, 0x6f, - 0x76, 0x69, 0x64, 0x65, 0x64, 0x2c, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, - 0x61, 0x70, 0x70, 0x6c, 0x79, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x32, 0x02, 0x0d, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x32, 0x0e, 0x14, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x32, 0x17, 0x18, 0x0a, 0x53, 0x0a, 0x04, - 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x34, 0x02, 0x21, 0x1a, 0x46, 0x20, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, - 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x34, 0x02, 0x13, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x34, 0x14, 0x1c, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x34, 0x1f, 0x20, 0x0a, 0x64, 0x0a, 0x04, 0x04, - 0x01, 0x02, 0x02, 0x12, 0x03, 0x36, 0x02, 0x1b, 0x1a, 0x57, 0x20, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, - 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x77, 0x69, 0x6c, - 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x2e, 0x20, 0x28, 0x65, - 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, 0x79, 0x2d, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x29, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x36, 0x02, 0x08, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x36, 0x09, 0x16, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x36, 0x19, 0x1a, 0x0a, 0x39, 0x0a, 0x04, 0x04, - 0x01, 0x02, 0x03, 0x12, 0x03, 0x38, 0x02, 0x34, 0x1a, 0x2c, 0x20, 0x73, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, 0x61, 0x73, - 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x06, 0x12, - 0x03, 0x38, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x01, 0x12, 0x03, 0x38, - 0x18, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, 0x03, 0x38, 0x32, 0x33, - 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x3c, 0x02, 0x21, 0x1a, 0x92, + 0x67, 0x6f, 0x72, 0x79, 0x22, 0x59, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4d, + 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3f, + 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x25, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, + 0xab, 0x01, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x5f, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x14, 0x0a, 0x05, 0x6c, + 0x69, 0x6e, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x04, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x65, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x44, 0x0a, 0x0d, 0x74, 0x61, 0x73, 0x6b, 0x5f, + 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x52, + 0x0c, 0x74, 0x61, 0x73, 0x6b, 0x43, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x79, 0x22, 0x31, 0x0a, + 0x19, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x22, 0x33, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0xa7, 0x01, 0x0a, 0x13, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, + 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x46, 0x0a, + 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x48, 0x00, 0x52, 0x06, 0x68, + 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x40, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x73, 0x6b, 0x4c, + 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x6f, 0x64, 0x79, 0x48, + 0x00, 0x52, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x42, 0x06, 0x0a, 0x04, 0x70, 0x61, 0x72, 0x74, 0x42, + 0xc3, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0e, 0x43, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0x9b, 0x46, 0x0a, 0x07, 0x12, 0x05, 0x00, 0x00, 0xdc, 0x01, + 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, + 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x28, + 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x29, 0x0a, 0x09, 0x0a, 0x02, 0x03, + 0x02, 0x12, 0x03, 0x06, 0x00, 0x26, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x03, 0x12, 0x03, 0x07, 0x00, + 0x27, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x04, 0x12, 0x03, 0x08, 0x00, 0x24, 0x0a, 0x09, 0x0a, 0x02, + 0x03, 0x05, 0x12, 0x03, 0x09, 0x00, 0x25, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x06, 0x12, 0x03, 0x0a, + 0x00, 0x28, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x07, 0x12, 0x03, 0x0b, 0x00, 0x26, 0x0a, 0x09, 0x0a, + 0x02, 0x03, 0x08, 0x12, 0x03, 0x0c, 0x00, 0x29, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x0e, + 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x0e, 0x00, 0x4c, 0x0a, 0x87, 0x01, + 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x11, 0x00, 0x2b, 0x01, 0x1a, 0x5f, 0x20, 0x52, 0x65, 0x70, + 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x73, 0x75, 0x62, 0x73, 0x65, 0x74, + 0x20, 0x6f, 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x61, 0x72, 0x65, 0x20, 0x72, 0x65, 0x6c, + 0x65, 0x76, 0x61, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x0a, 0x22, 0x1a, 0x20, 0x49, 0x44, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, + 0x11, 0x08, 0x1d, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x14, 0x02, 0x35, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x14, 0x02, 0x1e, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x14, 0x1f, 0x30, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x14, 0x33, 0x34, 0x0a, 0x3a, 0x0a, 0x04, 0x04, 0x00, + 0x02, 0x01, 0x12, 0x03, 0x16, 0x02, 0x17, 0x1a, 0x2d, 0x20, 0x6b, 0x38, 0x73, 0x20, 0x6e, 0x61, + 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x69, 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x65, 0x64, 0x20, 0x69, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, + 0x03, 0x16, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x16, + 0x09, 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x16, 0x15, 0x16, + 0x0a, 0x34, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x18, 0x02, 0x21, 0x1a, 0x27, 0x20, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, 0x63, 0x68, 0x65, 0x64, 0x20, + 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, + 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x06, 0x12, + 0x03, 0x18, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x18, + 0x16, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, 0x1f, 0x20, + 0x0a, 0x39, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x1a, 0x02, 0x26, 0x1a, 0x2c, 0x20, + 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x03, 0x06, 0x12, 0x03, 0x1a, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x03, 0x01, 0x12, 0x03, 0x1a, 0x16, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, + 0x12, 0x03, 0x1a, 0x24, 0x25, 0x0a, 0x45, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x1c, + 0x02, 0x21, 0x1a, 0x38, 0x20, 0x6b, 0x38, 0x73, 0x20, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, + 0x20, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x61, 0x73, 0x73, 0x6f, 0x63, 0x69, 0x61, + 0x74, 0x65, 0x64, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x04, 0x05, 0x12, 0x03, 0x1c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x04, 0x01, 0x12, 0x03, 0x1c, 0x09, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, + 0x03, 0x12, 0x03, 0x1c, 0x1f, 0x20, 0x0a, 0x43, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x05, 0x12, 0x03, + 0x1e, 0x02, 0x30, 0x1a, 0x36, 0x20, 0x45, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, + 0x74, 0x20, 0x76, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x61, 0x74, 0x74, 0x61, + 0x63, 0x68, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x05, 0x06, 0x12, 0x03, 0x1e, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x05, 0x01, 0x12, 0x03, 0x1e, 0x16, 0x2b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x05, 0x03, + 0x12, 0x03, 0x1e, 0x2e, 0x2f, 0x0a, 0x98, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x06, 0x12, 0x03, + 0x21, 0x02, 0x19, 0x1a, 0x8a, 0x01, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, + 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, + 0x20, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x66, 0x61, 0x69, 0x6c, 0x73, 0x2c, 0x20, 0x69, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, + 0x20, 0x72, 0x65, 0x74, 0x72, 0x69, 0x65, 0x64, 0x20, 0x75, 0x70, 0x20, 0x74, 0x6f, 0x20, 0x74, + 0x68, 0x69, 0x73, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x2e, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x05, 0x12, 0x03, 0x21, 0x02, 0x07, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x06, 0x01, 0x12, 0x03, 0x21, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x06, 0x03, 0x12, 0x03, 0x21, 0x17, 0x18, 0x0a, 0x83, 0x01, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x07, 0x12, 0x03, 0x24, 0x02, 0x19, 0x1a, 0x76, 0x20, 0x49, 0x6e, 0x64, 0x69, 0x63, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, + 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, + 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x49, 0x66, 0x20, 0x73, 0x65, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x74, + 0x72, 0x75, 0x65, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x63, 0x61, + 0x6e, 0x20, 0x62, 0x65, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x20, 0x62, 0x65, 0x66, + 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x07, 0x05, 0x12, 0x03, 0x24, 0x02, 0x06, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x07, 0x01, 0x12, 0x03, 0x24, 0x07, 0x14, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x00, 0x02, 0x07, 0x03, 0x12, 0x03, 0x24, 0x17, 0x18, 0x0a, 0xde, 0x01, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x08, 0x12, 0x03, 0x28, 0x02, 0x2c, 0x1a, 0xd0, 0x01, 0x20, 0x53, 0x70, 0x65, 0x63, + 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, + 0x6f, 0x6c, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x66, 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x20, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x20, 0x61, 0x74, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, + 0x20, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x0a, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, + 0x74, 0x61, 0x6b, 0x65, 0x20, 0x65, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2e, 0x20, 0x49, 0x66, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x6f, + 0x6e, 0x73, 0x65, 0x63, 0x75, 0x74, 0x69, 0x76, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x66, + 0x61, 0x69, 0x6c, 0x75, 0x72, 0x65, 0x73, 0x20, 0x65, 0x78, 0x63, 0x65, 0x65, 0x64, 0x73, 0x20, + 0x74, 0x68, 0x69, 0x73, 0x20, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x2c, 0x0a, + 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x72, 0x75, 0x70, 0x74, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x62, + 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, + 0x61, 0x63, 0x74, 0x69, 0x76, 0x61, 0x74, 0x65, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x08, 0x05, 0x12, 0x03, 0x28, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x08, 0x01, 0x12, 0x03, 0x28, 0x08, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x08, 0x03, + 0x12, 0x03, 0x28, 0x2a, 0x2b, 0x0a, 0x3b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x09, 0x12, 0x03, 0x2a, + 0x02, 0x1e, 0x1a, 0x2e, 0x20, 0x49, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x20, 0x6f, 0x66, + 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, + 0x69, 0x73, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x09, 0x06, 0x12, 0x03, 0x2a, 0x02, 0x0f, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x09, 0x01, 0x12, 0x03, 0x2a, 0x10, 0x18, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x09, 0x03, 0x12, 0x03, 0x2a, 0x1b, 0x1d, 0x0a, 0x3c, 0x0a, 0x02, + 0x04, 0x01, 0x12, 0x04, 0x2e, 0x00, 0x3d, 0x01, 0x1a, 0x30, 0x20, 0x52, 0x65, 0x70, 0x72, 0x65, + 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, + 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, + 0x01, 0x12, 0x03, 0x2e, 0x08, 0x19, 0x0a, 0xab, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, + 0x03, 0x32, 0x02, 0x19, 0x1a, 0x9d, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x73, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x20, 0x41, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, + 0x20, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x73, 0x20, 0x6d, 0x75, 0x73, 0x74, 0x20, 0x62, 0x65, 0x0a, + 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x69, + 0x73, 0x20, 0x6d, 0x61, 0x70, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x6e, 0x6f, 0x74, 0x20, 0x70, + 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x2c, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, + 0x73, 0x20, 0x61, 0x70, 0x70, 0x6c, 0x79, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x32, + 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x32, 0x0e, 0x14, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x32, 0x17, 0x18, 0x0a, 0x53, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x34, 0x02, 0x21, 0x1a, 0x46, 0x20, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x34, 0x02, + 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x34, 0x14, 0x1c, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x34, 0x1f, 0x20, 0x0a, 0x64, 0x0a, + 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x36, 0x02, 0x1b, 0x1a, 0x57, 0x20, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x77, + 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x2e, 0x20, + 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, 0x79, 0x2d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x36, 0x02, + 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x36, 0x09, 0x16, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x36, 0x19, 0x1a, 0x0a, 0x39, 0x0a, + 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x38, 0x02, 0x34, 0x1a, 0x2c, 0x20, 0x73, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, + 0x06, 0x12, 0x03, 0x38, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x01, 0x12, + 0x03, 0x38, 0x18, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, 0x03, 0x38, + 0x32, 0x33, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x3c, 0x02, 0x21, + 0x1a, 0x92, 0x01, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, + 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x20, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, + 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, + 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, 0x20, 0x69, + 0x74, 0x27, 0x73, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x06, 0x12, 0x03, + 0x3c, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x3c, 0x12, + 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, 0x3c, 0x1f, 0x20, 0x0a, + 0x35, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x40, 0x00, 0x43, 0x01, 0x1a, 0x29, 0x20, 0x52, 0x65, + 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x40, + 0x08, 0x1a, 0x0a, 0x78, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x42, 0x02, 0x1a, 0x1a, + 0x6b, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x20, 0x69, + 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x63, + 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x20, 0x28, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x64, 0x69, + 0x63, 0x74, 0x20, 0x28, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, + 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x42, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x00, 0x01, 0x12, 0x03, 0x42, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x42, 0x18, 0x19, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x45, 0x00, + 0x52, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x45, 0x08, 0x1b, 0x0a, 0x53, + 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x47, 0x02, 0x21, 0x1a, 0x46, 0x20, 0x54, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, 0x6c, + 0x61, 0x74, 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, + 0x6b, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x47, 0x02, + 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x47, 0x14, 0x1c, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x47, 0x1f, 0x20, 0x0a, 0x64, 0x0a, + 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, 0x49, 0x02, 0x1b, 0x1a, 0x57, 0x20, 0x50, 0x72, 0x65, + 0x66, 0x69, 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x77, + 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x2e, 0x20, + 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, 0x79, 0x2d, 0x62, 0x75, + 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, 0x74, 0x72, 0x69, 0x6e, + 0x67, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x05, 0x12, 0x03, 0x49, 0x02, + 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x49, 0x09, 0x16, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x49, 0x19, 0x1a, 0x0a, 0x39, 0x0a, + 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x4b, 0x02, 0x34, 0x1a, 0x2c, 0x20, 0x73, 0x75, 0x62, + 0x73, 0x65, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, + 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, + 0x06, 0x12, 0x03, 0x4b, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, 0x12, + 0x03, 0x4b, 0x18, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, 0x4b, + 0x32, 0x33, 0x0a, 0x68, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x03, 0x12, 0x03, 0x4d, 0x02, 0x23, 0x1a, + 0x5b, 0x20, 0x4d, 0x61, 0x78, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x53, 0x69, 0x7a, 0x65, + 0x42, 0x79, 0x74, 0x65, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, + 0x6e, 0x20, 0x62, 0x65, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, + 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x03, 0x05, 0x12, 0x03, 0x4d, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x03, 0x01, 0x12, 0x03, 0x4d, 0x08, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, + 0x03, 0x12, 0x03, 0x4d, 0x21, 0x22, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x04, 0x12, + 0x03, 0x51, 0x02, 0x21, 0x1a, 0x92, 0x01, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x28, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, + 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, + 0x69, 0x66, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x04, 0x06, 0x12, 0x03, 0x51, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x51, 0x12, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x51, 0x1f, 0x20, 0x0a, 0x51, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04, 0x55, 0x00, 0x60, 0x01, 0x1a, + 0x45, 0x20, 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, + 0x20, 0x74, 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x61, 0x20, 0x6a, 0x6f, 0x62, 0x20, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x73, 0x65, + 0x72, 0x76, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x55, + 0x08, 0x16, 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x57, 0x02, 0x1a, 0x1a, + 0x3a, 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x74, 0x6f, + 0x20, 0x62, 0x65, 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x04, 0x02, 0x00, 0x05, 0x12, 0x03, 0x57, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, + 0x00, 0x01, 0x12, 0x03, 0x57, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, + 0x12, 0x03, 0x57, 0x18, 0x19, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x59, + 0x02, 0x21, 0x1a, 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, + 0x64, 0x20, 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, + 0x20, 0x54, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, + 0x69, 0x66, 0x69, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x06, + 0x12, 0x03, 0x59, 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, 0x03, + 0x59, 0x0f, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x59, 0x1f, + 0x20, 0x0a, 0x64, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, 0x5b, 0x02, 0x1b, 0x1a, 0x57, + 0x20, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, + 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, + 0x74, 0x61, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, + 0x65, 0x6e, 0x2e, 0x20, 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, + 0x79, 0x2d, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x05, + 0x12, 0x03, 0x5b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, + 0x5b, 0x09, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x5b, 0x19, + 0x1a, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x03, 0x12, 0x03, 0x5f, 0x02, 0x21, 0x1a, + 0x92, 0x01, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x73, + 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x20, 0x43, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, + 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, + 0x27, 0x73, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x06, 0x12, 0x03, 0x5f, + 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x01, 0x12, 0x03, 0x5f, 0x12, 0x1c, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x03, 0x12, 0x03, 0x5f, 0x1f, 0x20, 0x0a, 0x3a, + 0x0a, 0x02, 0x04, 0x05, 0x12, 0x04, 0x63, 0x00, 0x65, 0x01, 0x1a, 0x2e, 0x20, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x20, + 0x69, 0x6e, 0x64, 0x69, 0x76, 0x69, 0x64, 0x75, 0x61, 0x6c, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, + 0x01, 0x12, 0x03, 0x63, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, + 0x64, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x06, 0x12, 0x03, 0x64, 0x02, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x64, 0x0b, 0x13, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x64, 0x16, 0x17, 0x0a, 0x0a, 0x0a, + 0x02, 0x04, 0x06, 0x12, 0x04, 0x67, 0x00, 0x74, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, + 0x12, 0x03, 0x67, 0x08, 0x10, 0x0a, 0xb1, 0x01, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, + 0x6b, 0x02, 0x1b, 0x1a, 0xa3, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, + 0x74, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x69, 0x6f, 0x6e, 0x2e, 0x20, 0x49, 0x74, 0x27, 0x73, 0x20, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, + 0x6c, 0x6c, 0x79, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x73, 0x71, 0x6c, 0x20, + 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, + 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x20, 0x61, 0x0a, 0x20, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, + 0x64, 0x20, 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x69, + 0x6e, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x2b, + 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, + 0x00, 0x06, 0x12, 0x03, 0x6b, 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, + 0x12, 0x03, 0x6b, 0x0f, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, + 0x6b, 0x19, 0x1a, 0x0a, 0x55, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x6d, 0x02, 0x15, + 0x1a, 0x48, 0x20, 0x41, 0x20, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x76, 0x65, + 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x20, + 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, 0x72, + 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, + 0x02, 0x01, 0x05, 0x12, 0x03, 0x6d, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, + 0x01, 0x12, 0x03, 0x6d, 0x09, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x03, 0x12, + 0x03, 0x6d, 0x13, 0x14, 0x0a, 0x36, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x02, 0x12, 0x03, 0x6f, 0x02, + 0x26, 0x1a, 0x29, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x06, 0x02, 0x02, 0x04, 0x12, 0x03, 0x6f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, + 0x02, 0x02, 0x06, 0x12, 0x03, 0x6f, 0x0b, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x6f, 0x18, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x6f, 0x24, 0x25, 0x0a, 0x63, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x03, 0x12, 0x03, 0x71, 0x02, + 0x25, 0x1a, 0x56, 0x20, 0x54, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, + 0x73, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, + 0x69, 0x6e, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x27, 0x73, 0x20, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, + 0x03, 0x06, 0x12, 0x03, 0x71, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x71, 0x1b, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x03, 0x12, 0x03, + 0x71, 0x23, 0x24, 0x0a, 0x35, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x04, 0x12, 0x03, 0x73, 0x02, 0x29, + 0x1a, 0x28, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x73, + 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, + 0x02, 0x04, 0x06, 0x12, 0x03, 0x73, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, + 0x01, 0x12, 0x03, 0x73, 0x19, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, 0x03, 0x12, + 0x03, 0x73, 0x27, 0x28, 0x0a, 0x2f, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x05, 0x77, 0x00, 0x80, 0x01, + 0x01, 0x1a, 0x22, 0x20, 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x75, 0x73, + 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x61, 0x20, 0x74, + 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, 0x03, 0x77, 0x08, + 0x19, 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x03, 0x79, 0x02, 0x1a, 0x1a, 0x3a, + 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x62, 0x65, 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x79, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x79, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x79, 0x18, 0x19, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x01, 0x12, 0x03, 0x7b, 0x02, + 0x21, 0x1a, 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x20, 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, + 0x54, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x06, 0x12, + 0x03, 0x7b, 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x01, 0x12, 0x03, 0x7b, + 0x0f, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x03, 0x12, 0x03, 0x7b, 0x1f, 0x20, + 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x02, 0x12, 0x03, 0x7f, 0x02, 0x21, 0x1a, 0x92, 0x01, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, @@ -956,563 +1354,735 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x06, 0x12, 0x03, 0x3c, 0x02, - 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x3c, 0x12, 0x1c, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, 0x3c, 0x1f, 0x20, 0x0a, 0x35, 0x0a, - 0x02, 0x04, 0x02, 0x12, 0x04, 0x40, 0x00, 0x43, 0x01, 0x1a, 0x29, 0x20, 0x52, 0x65, 0x70, 0x72, - 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x20, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, - 0x72, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x40, 0x08, 0x1a, - 0x0a, 0x78, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x42, 0x02, 0x1a, 0x1a, 0x6b, 0x20, - 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x20, 0x69, 0x73, 0x20, - 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x63, 0x6f, 0x75, - 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x28, - 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x64, 0x69, 0x63, 0x74, - 0x20, 0x28, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x20, 0x6d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, - 0x02, 0x00, 0x05, 0x12, 0x03, 0x42, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, - 0x01, 0x12, 0x03, 0x42, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, - 0x03, 0x42, 0x18, 0x19, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x45, 0x00, 0x52, 0x01, - 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, 0x03, 0x45, 0x08, 0x1b, 0x0a, 0x53, 0x0a, 0x04, - 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x47, 0x02, 0x21, 0x1a, 0x46, 0x20, 0x54, 0x65, 0x6d, 0x70, - 0x6c, 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, - 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x63, 0x61, 0x70, 0x73, 0x75, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x06, 0x12, 0x03, 0x47, 0x02, 0x13, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x47, 0x14, 0x1c, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x47, 0x1f, 0x20, 0x0a, 0x64, 0x0a, 0x04, 0x04, - 0x03, 0x02, 0x01, 0x12, 0x03, 0x49, 0x02, 0x1b, 0x1a, 0x57, 0x20, 0x50, 0x72, 0x65, 0x66, 0x69, - 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, - 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x77, 0x69, 0x6c, - 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, 0x2e, 0x20, 0x28, 0x65, - 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, 0x79, 0x2d, 0x62, 0x75, 0x63, 0x6b, - 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x29, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x05, 0x12, 0x03, 0x49, 0x02, 0x08, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x49, 0x09, 0x16, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x49, 0x19, 0x1a, 0x0a, 0x39, 0x0a, 0x04, 0x04, - 0x03, 0x02, 0x02, 0x12, 0x03, 0x4b, 0x02, 0x34, 0x1a, 0x2c, 0x20, 0x73, 0x75, 0x62, 0x73, 0x65, - 0x74, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x74, 0x61, 0x73, - 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x06, 0x12, - 0x03, 0x4b, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, 0x12, 0x03, 0x4b, - 0x18, 0x2f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, 0x4b, 0x32, 0x33, - 0x0a, 0x68, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x03, 0x12, 0x03, 0x4d, 0x02, 0x23, 0x1a, 0x5b, 0x20, - 0x4d, 0x61, 0x78, 0x44, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x53, 0x69, 0x7a, 0x65, 0x42, 0x79, - 0x74, 0x65, 0x73, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, - 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x63, 0x61, 0x6e, 0x20, - 0x62, 0x65, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, - 0x02, 0x03, 0x05, 0x12, 0x03, 0x4d, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, - 0x01, 0x12, 0x03, 0x4d, 0x08, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x03, 0x12, - 0x03, 0x4d, 0x21, 0x22, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x04, 0x12, 0x03, 0x51, - 0x02, 0x21, 0x1a, 0x92, 0x01, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x28, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x29, 0x20, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, - 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, - 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, - 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x74, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, - 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x06, - 0x12, 0x03, 0x51, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x01, 0x12, 0x03, - 0x51, 0x12, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x03, 0x12, 0x03, 0x51, 0x1f, - 0x20, 0x0a, 0x51, 0x0a, 0x02, 0x04, 0x04, 0x12, 0x04, 0x55, 0x00, 0x60, 0x01, 0x1a, 0x45, 0x20, - 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, - 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x61, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x73, 0x65, 0x72, 0x76, - 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x04, 0x01, 0x12, 0x03, 0x55, 0x08, 0x16, - 0x0a, 0x47, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x00, 0x12, 0x03, 0x57, 0x02, 0x1a, 0x1a, 0x3a, 0x20, - 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x62, - 0x65, 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, - 0x00, 0x05, 0x12, 0x03, 0x57, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x01, - 0x12, 0x03, 0x57, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x00, 0x03, 0x12, 0x03, - 0x57, 0x18, 0x19, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x01, 0x12, 0x03, 0x59, 0x02, 0x21, - 0x1a, 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, - 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x54, - 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, - 0x69, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x06, 0x12, 0x03, - 0x59, 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x01, 0x12, 0x03, 0x59, 0x0f, - 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x01, 0x03, 0x12, 0x03, 0x59, 0x1f, 0x20, 0x0a, - 0x64, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x02, 0x12, 0x03, 0x5b, 0x02, 0x1b, 0x1a, 0x57, 0x20, 0x50, - 0x72, 0x65, 0x66, 0x69, 0x78, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x77, 0x68, 0x65, 0x72, 0x65, 0x20, - 0x74, 0x61, 0x73, 0x6b, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x20, 0x64, 0x61, 0x74, 0x61, - 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x77, 0x72, 0x69, 0x74, 0x74, 0x65, 0x6e, - 0x2e, 0x20, 0x28, 0x65, 0x2e, 0x67, 0x2e, 0x20, 0x73, 0x33, 0x3a, 0x2f, 0x2f, 0x6d, 0x79, 0x2d, - 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x2f, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x73, 0x74, 0x72, - 0x69, 0x6e, 0x67, 0x29, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x05, 0x12, 0x03, - 0x5b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x5b, 0x09, - 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x5b, 0x19, 0x1a, 0x0a, - 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x04, 0x02, 0x03, 0x12, 0x03, 0x5f, 0x02, 0x21, 0x1a, 0x92, 0x01, - 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x73, 0x65, 0x63, - 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x20, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, - 0x6e, 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, - 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x27, 0x73, - 0x20, 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x06, 0x12, 0x03, 0x5f, 0x02, 0x11, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x01, 0x12, 0x03, 0x5f, 0x12, 0x1c, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x04, 0x02, 0x03, 0x03, 0x12, 0x03, 0x5f, 0x1f, 0x20, 0x0a, 0x3a, 0x0a, 0x02, - 0x04, 0x05, 0x12, 0x04, 0x63, 0x00, 0x65, 0x01, 0x1a, 0x2e, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x20, 0x69, 0x6e, - 0x64, 0x69, 0x76, 0x69, 0x64, 0x75, 0x61, 0x6c, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x05, 0x01, 0x12, - 0x03, 0x63, 0x08, 0x17, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x05, 0x02, 0x00, 0x12, 0x03, 0x64, 0x02, - 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x06, 0x12, 0x03, 0x64, 0x02, 0x0a, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x05, 0x02, 0x00, 0x01, 0x12, 0x03, 0x64, 0x0b, 0x13, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x05, 0x02, 0x00, 0x03, 0x12, 0x03, 0x64, 0x16, 0x17, 0x0a, 0x0a, 0x0a, 0x02, 0x04, - 0x06, 0x12, 0x04, 0x67, 0x00, 0x74, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x06, 0x01, 0x12, 0x03, - 0x67, 0x08, 0x10, 0x0a, 0xb1, 0x01, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x00, 0x12, 0x03, 0x6b, 0x02, - 0x1b, 0x1a, 0xa3, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x73, - 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, - 0x6e, 0x2e, 0x20, 0x49, 0x74, 0x27, 0x73, 0x20, 0x74, 0x79, 0x70, 0x69, 0x63, 0x61, 0x6c, 0x6c, - 0x79, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x73, 0x71, 0x6c, 0x20, 0x74, 0x61, - 0x73, 0x6b, 0x2e, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x73, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x20, 0x61, 0x0a, 0x20, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x64, 0x20, - 0x64, 0x61, 0x74, 0x61, 0x73, 0x65, 0x74, 0x20, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x69, 0x6e, 0x67, - 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x20, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x20, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x06, - 0x12, 0x03, 0x6b, 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x01, 0x12, 0x03, - 0x6b, 0x0f, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x00, 0x03, 0x12, 0x03, 0x6b, 0x19, - 0x1a, 0x0a, 0x55, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x01, 0x12, 0x03, 0x6d, 0x02, 0x15, 0x1a, 0x48, - 0x20, 0x41, 0x20, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x76, 0x65, 0x20, 0x6d, - 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, - 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x2e, 0x20, 0x65, 0x2e, - 0x67, 0x2e, 0x20, 0x77, 0x61, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, - 0x05, 0x12, 0x03, 0x6d, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x01, 0x12, - 0x03, 0x6d, 0x09, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x01, 0x03, 0x12, 0x03, 0x6d, - 0x13, 0x14, 0x0a, 0x36, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x02, 0x12, 0x03, 0x6f, 0x02, 0x26, 0x1a, - 0x29, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x69, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, - 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, - 0x02, 0x02, 0x04, 0x12, 0x03, 0x6f, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, - 0x06, 0x12, 0x03, 0x6f, 0x0b, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x01, 0x12, - 0x03, 0x6f, 0x18, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x02, 0x03, 0x12, 0x03, 0x6f, - 0x24, 0x25, 0x0a, 0x63, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x03, 0x12, 0x03, 0x71, 0x02, 0x25, 0x1a, - 0x56, 0x20, 0x54, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x73, 0x20, - 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x74, 0x65, 0x72, 0x6d, 0x69, 0x6e, - 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x68, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x27, 0x73, 0x20, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x06, - 0x12, 0x03, 0x71, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x01, 0x12, 0x03, - 0x71, 0x1b, 0x20, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x03, 0x03, 0x12, 0x03, 0x71, 0x23, - 0x24, 0x0a, 0x35, 0x0a, 0x04, 0x04, 0x06, 0x02, 0x04, 0x12, 0x03, 0x73, 0x02, 0x29, 0x1a, 0x28, - 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x73, 0x70, 0x65, - 0x63, 0x69, 0x66, 0x69, 0x63, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, - 0x06, 0x12, 0x03, 0x73, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, 0x01, 0x12, - 0x03, 0x73, 0x19, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x06, 0x02, 0x04, 0x03, 0x12, 0x03, 0x73, - 0x27, 0x28, 0x0a, 0x2f, 0x0a, 0x02, 0x04, 0x07, 0x12, 0x05, 0x77, 0x00, 0x80, 0x01, 0x01, 0x1a, - 0x22, 0x20, 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, - 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, - 0x6b, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x07, 0x01, 0x12, 0x03, 0x77, 0x08, 0x19, 0x0a, - 0x47, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x00, 0x12, 0x03, 0x79, 0x02, 0x1a, 0x1a, 0x3a, 0x20, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x62, 0x6f, 0x75, 0x74, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x65, - 0x20, 0x70, 0x61, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, - 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, - 0x05, 0x12, 0x03, 0x79, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x01, 0x12, - 0x03, 0x79, 0x08, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x00, 0x03, 0x12, 0x03, 0x79, - 0x18, 0x19, 0x0a, 0x40, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x01, 0x12, 0x03, 0x7b, 0x02, 0x21, 0x1a, - 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x79, - 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x54, 0x61, - 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x06, 0x12, 0x03, 0x7b, - 0x02, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x01, 0x12, 0x03, 0x7b, 0x0f, 0x1c, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x01, 0x03, 0x12, 0x03, 0x7b, 0x1f, 0x20, 0x0a, 0xa0, - 0x01, 0x0a, 0x04, 0x04, 0x07, 0x02, 0x02, 0x12, 0x03, 0x7f, 0x02, 0x21, 0x1a, 0x92, 0x01, 0x20, - 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x28, 0x73, 0x65, 0x63, 0x72, - 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x20, 0x72, - 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x6e, 0x65, - 0x63, 0x74, 0x6f, 0x72, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x73, 0x65, 0x63, 0x72, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x54, - 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x69, 0x66, 0x20, 0x69, 0x74, 0x27, 0x73, 0x20, - 0x4e, 0x6f, 0x6e, 0x65, 0x2e, 0x0a, 0x20, 0x2b, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x06, 0x12, 0x03, 0x7f, 0x02, 0x11, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x01, 0x12, 0x03, 0x7f, 0x12, 0x1c, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x07, 0x02, 0x02, 0x03, 0x12, 0x03, 0x7f, 0x1f, 0x20, 0x0a, 0x28, 0x0a, 0x02, 0x04, - 0x08, 0x12, 0x04, 0x83, 0x01, 0x00, 0x1d, 0x1a, 0x1c, 0x20, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x61, 0x20, 0x74, - 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x08, 0x01, 0x12, 0x04, 0x83, 0x01, - 0x08, 0x1a, 0x0a, 0x3c, 0x0a, 0x02, 0x04, 0x09, 0x12, 0x06, 0x86, 0x01, 0x00, 0x8b, 0x01, 0x01, - 0x1a, 0x2e, 0x20, 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x0a, - 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x04, 0x86, 0x01, 0x08, 0x11, 0x0a, 0x45, 0x0a, - 0x04, 0x04, 0x09, 0x02, 0x00, 0x12, 0x04, 0x88, 0x01, 0x02, 0x12, 0x1a, 0x37, 0x20, 0x4e, 0x61, - 0x6d, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x76, 0x65, 0x6c, 0x6f, - 0x70, 0x65, 0x72, 0x2d, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x6e, 0x61, 0x6d, - 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x05, 0x12, 0x04, 0x88, - 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x01, 0x12, 0x04, 0x88, 0x01, - 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x03, 0x12, 0x04, 0x88, 0x01, 0x10, - 0x11, 0x0a, 0x68, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x02, 0x36, 0x1a, - 0x5a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, - 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x20, 0x6f, - 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x20, 0x74, 0x68, 0x61, 0x74, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x63, - 0x61, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x09, 0x02, 0x01, 0x04, 0x12, 0x04, 0x8a, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, - 0x02, 0x01, 0x06, 0x12, 0x04, 0x8a, 0x01, 0x0b, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, - 0x01, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x18, 0x31, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x01, - 0x03, 0x12, 0x04, 0x8a, 0x01, 0x34, 0x35, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x0a, 0x12, 0x06, 0x8d, - 0x01, 0x00, 0x92, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x04, 0x8d, 0x01, - 0x08, 0x14, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x00, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x12, - 0x1a, 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, - 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x05, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, - 0x05, 0x04, 0x0a, 0x02, 0x00, 0x01, 0x12, 0x04, 0x8f, 0x01, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, 0x04, 0x8f, 0x01, 0x10, 0x11, 0x0a, 0x2d, 0x0a, 0x04, 0x04, - 0x0a, 0x02, 0x01, 0x12, 0x04, 0x91, 0x01, 0x02, 0x14, 0x1a, 0x1f, 0x20, 0x54, 0x68, 0x65, 0x20, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, - 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, - 0x02, 0x01, 0x05, 0x12, 0x04, 0x91, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, - 0x01, 0x01, 0x12, 0x04, 0x91, 0x01, 0x08, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x01, - 0x03, 0x12, 0x04, 0x91, 0x01, 0x12, 0x13, 0x0a, 0x2e, 0x0a, 0x02, 0x04, 0x0b, 0x12, 0x06, 0x95, - 0x01, 0x00, 0x98, 0x01, 0x01, 0x1a, 0x20, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0b, 0x01, 0x12, 0x04, - 0x95, 0x01, 0x08, 0x1b, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x0b, 0x02, 0x00, 0x12, 0x04, 0x97, 0x01, + 0x61, 0x6c, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x06, 0x12, 0x03, 0x7f, 0x02, + 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x01, 0x12, 0x03, 0x7f, 0x12, 0x1c, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x07, 0x02, 0x02, 0x03, 0x12, 0x03, 0x7f, 0x1f, 0x20, 0x0a, 0x28, 0x0a, + 0x02, 0x04, 0x08, 0x12, 0x04, 0x83, 0x01, 0x00, 0x1d, 0x1a, 0x1c, 0x20, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x20, 0x61, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x08, 0x01, 0x12, 0x04, + 0x83, 0x01, 0x08, 0x1a, 0x0a, 0x3c, 0x0a, 0x02, 0x04, 0x09, 0x12, 0x06, 0x86, 0x01, 0x00, 0x8b, + 0x01, 0x01, 0x1a, 0x2e, 0x20, 0x41, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x09, 0x01, 0x12, 0x04, 0x86, 0x01, 0x08, 0x11, 0x0a, + 0x45, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x00, 0x12, 0x04, 0x88, 0x01, 0x02, 0x12, 0x1a, 0x37, 0x20, + 0x4e, 0x61, 0x6d, 0x65, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x76, 0x65, + 0x6c, 0x6f, 0x70, 0x65, 0x72, 0x2d, 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x6e, + 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x05, 0x12, + 0x04, 0x88, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x01, 0x12, 0x04, + 0x88, 0x01, 0x09, 0x0d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, 0x02, 0x00, 0x03, 0x12, 0x04, 0x88, + 0x01, 0x10, 0x11, 0x0a, 0x68, 0x0a, 0x04, 0x04, 0x09, 0x02, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x02, + 0x36, 0x1a, 0x5a, 0x20, 0x53, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, + 0x73, 0x6b, 0x5f, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, 0x20, 0x61, 0x72, + 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x74, 0x65, 0x67, 0x6f, 0x72, 0x69, 0x65, 0x73, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x20, 0x74, 0x68, + 0x61, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x20, 0x63, 0x61, 0x6e, 0x20, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x09, 0x02, 0x01, 0x04, 0x12, 0x04, 0x8a, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x09, 0x02, 0x01, 0x06, 0x12, 0x04, 0x8a, 0x01, 0x0b, 0x17, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x09, 0x02, 0x01, 0x01, 0x12, 0x04, 0x8a, 0x01, 0x18, 0x31, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x09, + 0x02, 0x01, 0x03, 0x12, 0x04, 0x8a, 0x01, 0x34, 0x35, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x0a, 0x12, + 0x06, 0x8d, 0x01, 0x00, 0x92, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0a, 0x01, 0x12, 0x04, + 0x8d, 0x01, 0x08, 0x14, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x0a, 0x02, 0x00, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x12, 0x1a, 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, - 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x05, 0x12, 0x04, 0x97, 0x01, 0x02, 0x08, 0x0a, - 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x01, 0x12, 0x04, 0x97, 0x01, 0x09, 0x0d, 0x0a, 0x0d, - 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x03, 0x12, 0x04, 0x97, 0x01, 0x10, 0x11, 0x0a, 0x33, 0x0a, - 0x02, 0x04, 0x0c, 0x12, 0x06, 0x9b, 0x01, 0x00, 0x9d, 0x01, 0x01, 0x1a, 0x25, 0x20, 0x41, 0x20, - 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, - 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0c, 0x01, 0x12, 0x04, 0x9b, 0x01, 0x08, 0x1c, 0x0a, - 0x0c, 0x0a, 0x04, 0x04, 0x0c, 0x02, 0x00, 0x12, 0x04, 0x9c, 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, - 0x05, 0x04, 0x0c, 0x02, 0x00, 0x06, 0x12, 0x04, 0x9c, 0x01, 0x02, 0x0b, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x0c, 0x02, 0x00, 0x01, 0x12, 0x04, 0x9c, 0x01, 0x0c, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x0c, 0x02, 0x00, 0x03, 0x12, 0x04, 0x9c, 0x01, 0x18, 0x19, 0x0a, 0x2f, 0x0a, 0x02, 0x04, 0x0d, - 0x12, 0x04, 0xa0, 0x01, 0x00, 0x20, 0x1a, 0x23, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x63, - 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, - 0x0d, 0x01, 0x12, 0x04, 0xa0, 0x01, 0x08, 0x1d, 0x0a, 0x3b, 0x0a, 0x02, 0x04, 0x0e, 0x12, 0x06, - 0xa3, 0x01, 0x00, 0xa5, 0x01, 0x01, 0x1a, 0x2d, 0x20, 0x41, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, - 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0e, 0x01, 0x12, 0x04, 0xa3, 0x01, - 0x08, 0x1e, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x00, 0x12, 0x04, 0xa4, 0x01, 0x02, 0x24, - 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x04, 0x12, 0x04, 0xa4, 0x01, 0x02, 0x0a, 0x0a, - 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x06, 0x12, 0x04, 0xa4, 0x01, 0x0b, 0x14, 0x0a, 0x0d, - 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x01, 0x12, 0x04, 0xa4, 0x01, 0x15, 0x1f, 0x0a, 0x0d, 0x0a, - 0x05, 0x04, 0x0e, 0x02, 0x00, 0x03, 0x12, 0x04, 0xa4, 0x01, 0x22, 0x23, 0x0a, 0x43, 0x0a, 0x02, - 0x04, 0x0f, 0x12, 0x06, 0xa8, 0x01, 0x00, 0xb6, 0x01, 0x01, 0x1a, 0x35, 0x20, 0x41, 0x20, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x61, - 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, - 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0f, 0x01, 0x12, 0x04, 0xa8, 0x01, 0x08, 0x1d, 0x0a, 0x75, - 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x00, 0x12, 0x04, 0xaa, 0x01, 0x02, 0x1a, 0x1a, 0x67, 0x20, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, - 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x28, 0x6a, 0x6f, 0x62, 0x49, 0x64, - 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x64, 0x69, 0x63, 0x74, 0x20, 0x28, 0x6d, 0x6f, 0x72, - 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x05, 0x12, 0x04, - 0xaa, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x01, 0x12, 0x04, 0xaa, - 0x01, 0x08, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x03, 0x12, 0x04, 0xaa, 0x01, - 0x18, 0x19, 0x0a, 0xa4, 0x01, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x01, 0x12, 0x04, 0xad, 0x01, 0x02, - 0x1e, 0x1a, 0x95, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x20, 0x74, 0x6f, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x65, 0x6d, - 0x70, 0x74, 0x79, 0x2c, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, - 0x20, 0x61, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x73, 0x65, 0x74, 0x20, 0x6f, - 0x66, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x0a, 0x20, 0x65, 0x2e, 0x67, 0x2e, - 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, 0x49, - 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x43, 0x50, 0x55, 0x5f, 0x41, 0x56, 0x47, 0x20, 0x6f, - 0x72, 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, 0x52, - 0x49, 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x4d, 0x4f, 0x52, 0x59, 0x5f, 0x42, - 0x59, 0x54, 0x45, 0x53, 0x5f, 0x41, 0x56, 0x47, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, - 0x01, 0x04, 0x12, 0x04, 0xad, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x01, - 0x05, 0x12, 0x04, 0xad, 0x01, 0x0b, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x01, 0x01, - 0x12, 0x04, 0xad, 0x01, 0x12, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x01, 0x03, 0x12, - 0x04, 0xad, 0x01, 0x1c, 0x1d, 0x0a, 0x2b, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x02, 0x12, 0x04, 0xaf, - 0x01, 0x02, 0x2b, 0x1a, 0x1d, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, 0x20, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x06, 0x12, 0x04, 0xaf, 0x01, 0x02, - 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x01, 0x12, 0x04, 0xaf, 0x01, 0x1c, 0x26, - 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x03, 0x12, 0x04, 0xaf, 0x01, 0x29, 0x2a, 0x0a, - 0x2a, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x03, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x29, 0x1a, 0x1c, 0x20, - 0x45, 0x6e, 0x64, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2c, 0x20, 0x69, - 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x2e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x0f, 0x02, 0x03, 0x06, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, - 0x02, 0x03, 0x01, 0x12, 0x04, 0xb1, 0x01, 0x1c, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, - 0x03, 0x03, 0x12, 0x04, 0xb1, 0x01, 0x27, 0x28, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x04, - 0x12, 0x04, 0xb3, 0x01, 0x02, 0x24, 0x1a, 0x4c, 0x20, 0x51, 0x75, 0x65, 0x72, 0x79, 0x20, 0x72, - 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x73, 0x74, 0x65, 0x70, 0x20, 0x77, - 0x69, 0x64, 0x74, 0x68, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x66, 0x6c, 0x6f, 0x61, 0x74, - 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, - 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x06, 0x12, 0x04, 0xb3, - 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x01, 0x12, 0x04, 0xb3, 0x01, - 0x1b, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x03, 0x12, 0x04, 0xb3, 0x01, 0x22, - 0x23, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x05, 0x12, 0x04, 0xb5, 0x01, 0x02, 0x21, 0x1a, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x0a, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x05, 0x12, 0x04, 0x8f, 0x01, 0x02, 0x08, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x01, 0x12, 0x04, 0x8f, 0x01, 0x09, 0x0d, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0a, 0x02, 0x00, 0x03, 0x12, 0x04, 0x8f, 0x01, 0x10, 0x11, 0x0a, 0x2d, 0x0a, + 0x04, 0x04, 0x0a, 0x02, 0x01, 0x12, 0x04, 0x91, 0x01, 0x02, 0x14, 0x1a, 0x1f, 0x20, 0x54, 0x68, + 0x65, 0x20, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x0a, 0x02, 0x01, 0x05, 0x12, 0x04, 0x91, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x0a, 0x02, 0x01, 0x01, 0x12, 0x04, 0x91, 0x01, 0x08, 0x0f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0a, + 0x02, 0x01, 0x03, 0x12, 0x04, 0x91, 0x01, 0x12, 0x13, 0x0a, 0x2e, 0x0a, 0x02, 0x04, 0x0b, 0x12, + 0x06, 0x95, 0x01, 0x00, 0x98, 0x01, 0x01, 0x1a, 0x20, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x61, 0x6e, 0x20, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0b, 0x01, + 0x12, 0x04, 0x95, 0x01, 0x08, 0x1b, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x0b, 0x02, 0x00, 0x12, 0x04, + 0x97, 0x01, 0x02, 0x12, 0x1a, 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, + 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, + 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x05, 0x12, 0x04, 0x97, 0x01, 0x02, + 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x01, 0x12, 0x04, 0x97, 0x01, 0x09, 0x0d, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0b, 0x02, 0x00, 0x03, 0x12, 0x04, 0x97, 0x01, 0x10, 0x11, 0x0a, + 0x33, 0x0a, 0x02, 0x04, 0x0c, 0x12, 0x06, 0x9b, 0x01, 0x00, 0x9d, 0x01, 0x01, 0x1a, 0x25, 0x20, + 0x41, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, + 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0c, 0x01, 0x12, 0x04, 0x9b, 0x01, 0x08, + 0x1c, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0c, 0x02, 0x00, 0x12, 0x04, 0x9c, 0x01, 0x02, 0x1a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x06, 0x12, 0x04, 0x9c, 0x01, 0x02, 0x0b, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x0c, 0x02, 0x00, 0x01, 0x12, 0x04, 0x9c, 0x01, 0x0c, 0x15, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0c, 0x02, 0x00, 0x03, 0x12, 0x04, 0x9c, 0x01, 0x18, 0x19, 0x0a, 0x2f, 0x0a, 0x02, + 0x04, 0x0d, 0x12, 0x04, 0xa0, 0x01, 0x00, 0x20, 0x1a, 0x23, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x61, 0x6c, 0x6c, + 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, + 0x03, 0x04, 0x0d, 0x01, 0x12, 0x04, 0xa0, 0x01, 0x08, 0x1d, 0x0a, 0x3b, 0x0a, 0x02, 0x04, 0x0e, + 0x12, 0x06, 0xa3, 0x01, 0x00, 0xa5, 0x01, 0x01, 0x1a, 0x2d, 0x20, 0x41, 0x20, 0x72, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x20, 0x61, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, 0x66, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0e, 0x01, 0x12, 0x04, + 0xa3, 0x01, 0x08, 0x1e, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x0e, 0x02, 0x00, 0x12, 0x04, 0xa4, 0x01, + 0x02, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x04, 0x12, 0x04, 0xa4, 0x01, 0x02, + 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x06, 0x12, 0x04, 0xa4, 0x01, 0x0b, 0x14, + 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x01, 0x12, 0x04, 0xa4, 0x01, 0x15, 0x1f, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x0e, 0x02, 0x00, 0x03, 0x12, 0x04, 0xa4, 0x01, 0x22, 0x23, 0x0a, 0x43, + 0x0a, 0x02, 0x04, 0x0f, 0x12, 0x06, 0xa8, 0x01, 0x00, 0xb6, 0x01, 0x01, 0x1a, 0x35, 0x20, 0x41, + 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x66, 0x72, 0x6f, 0x6d, + 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x0f, 0x01, 0x12, 0x04, 0xa8, 0x01, 0x08, 0x1d, + 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x00, 0x12, 0x04, 0xaa, 0x01, 0x02, 0x1a, 0x1a, 0x67, + 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, 0x69, 0x73, 0x20, 0x63, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, + 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, 0x74, 0x20, 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, + 0x62, 0x65, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x20, 0x28, 0x6a, 0x6f, 0x62, + 0x49, 0x64, 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x64, 0x69, 0x63, 0x74, 0x20, 0x28, 0x6d, + 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x78, 0x20, 0x6d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x05, + 0x12, 0x04, 0xaa, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x01, 0x12, + 0x04, 0xaa, 0x01, 0x08, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x00, 0x03, 0x12, 0x04, + 0xaa, 0x01, 0x18, 0x19, 0x0a, 0xa4, 0x01, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x01, 0x12, 0x04, 0xad, + 0x01, 0x02, 0x1e, 0x1a, 0x95, 0x01, 0x20, 0x54, 0x68, 0x65, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x20, 0x49, 0x66, 0x20, + 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2c, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x72, 0x65, 0x74, 0x75, + 0x72, 0x6e, 0x20, 0x61, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x73, 0x65, 0x74, + 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x2e, 0x0a, 0x20, 0x65, 0x2e, + 0x67, 0x2e, 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, 0x54, + 0x52, 0x49, 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x43, 0x50, 0x55, 0x5f, 0x41, 0x56, 0x47, + 0x20, 0x6f, 0x72, 0x20, 0x45, 0x58, 0x45, 0x43, 0x55, 0x54, 0x49, 0x4f, 0x4e, 0x5f, 0x4d, 0x45, + 0x54, 0x52, 0x49, 0x43, 0x5f, 0x55, 0x53, 0x45, 0x44, 0x5f, 0x4d, 0x45, 0x4d, 0x4f, 0x52, 0x59, + 0x5f, 0x42, 0x59, 0x54, 0x45, 0x53, 0x5f, 0x41, 0x56, 0x47, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x0f, 0x02, 0x01, 0x04, 0x12, 0x04, 0xad, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, + 0x02, 0x01, 0x05, 0x12, 0x04, 0xad, 0x01, 0x0b, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, + 0x01, 0x01, 0x12, 0x04, 0xad, 0x01, 0x12, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x01, + 0x03, 0x12, 0x04, 0xad, 0x01, 0x1c, 0x1d, 0x0a, 0x2b, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x02, 0x12, + 0x04, 0xaf, 0x01, 0x02, 0x2b, 0x1a, 0x1d, 0x20, 0x53, 0x74, 0x61, 0x72, 0x74, 0x20, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2c, 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x06, 0x12, 0x04, 0xaf, + 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x01, 0x12, 0x04, 0xaf, 0x01, + 0x1c, 0x26, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x02, 0x03, 0x12, 0x04, 0xaf, 0x01, 0x29, + 0x2a, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x03, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x29, 0x1a, + 0x1c, 0x20, 0x45, 0x6e, 0x64, 0x20, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2c, + 0x20, 0x69, 0x6e, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x2e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x0f, 0x02, 0x03, 0x06, 0x12, 0x04, 0xb1, 0x01, 0x02, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x0f, 0x02, 0x03, 0x01, 0x12, 0x04, 0xb1, 0x01, 0x1c, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x0f, 0x02, 0x03, 0x03, 0x12, 0x04, 0xb1, 0x01, 0x27, 0x28, 0x0a, 0x5a, 0x0a, 0x04, 0x04, 0x0f, + 0x02, 0x04, 0x12, 0x04, 0xb3, 0x01, 0x02, 0x24, 0x1a, 0x4c, 0x20, 0x51, 0x75, 0x65, 0x72, 0x79, + 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x73, 0x74, 0x65, 0x70, + 0x20, 0x77, 0x69, 0x64, 0x74, 0x68, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x20, 0x6f, 0x72, 0x20, 0x66, 0x6c, 0x6f, + 0x61, 0x74, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x06, 0x12, + 0x04, 0xb3, 0x01, 0x02, 0x1a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x01, 0x12, 0x04, + 0xb3, 0x01, 0x1b, 0x1f, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x04, 0x03, 0x12, 0x04, 0xb3, + 0x01, 0x22, 0x23, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x0f, 0x02, 0x05, 0x12, 0x04, 0xb5, 0x01, 0x02, + 0x21, 0x1a, 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x20, 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, + 0x54, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, + 0x66, 0x69, 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x06, 0x12, + 0x04, 0xb5, 0x01, 0x02, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x01, 0x12, 0x04, + 0xb5, 0x01, 0x0f, 0x1c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x03, 0x12, 0x04, 0xb5, + 0x01, 0x1f, 0x20, 0x0a, 0x4d, 0x0a, 0x02, 0x04, 0x10, 0x12, 0x06, 0xb9, 0x01, 0x00, 0xbc, 0x01, + 0x01, 0x1a, 0x3f, 0x20, 0x41, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, + 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x73, 0x74, + 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, + 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x10, 0x01, 0x12, 0x04, 0xb9, 0x01, 0x08, 0x1e, 0x0a, + 0x2d, 0x0a, 0x04, 0x04, 0x10, 0x02, 0x00, 0x12, 0x04, 0xbb, 0x01, 0x02, 0x32, 0x1a, 0x1f, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x10, 0x02, 0x00, 0x04, 0x12, 0x04, 0xbb, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x10, 0x02, 0x00, 0x06, 0x12, 0x04, 0xbb, 0x01, 0x0b, 0x25, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x10, 0x02, 0x00, 0x01, 0x12, 0x04, 0xbb, 0x01, 0x26, 0x2d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x10, 0x02, 0x00, 0x03, 0x12, 0x04, 0xbb, 0x01, 0x30, 0x31, 0x0a, 0x3f, 0x0a, 0x02, 0x04, 0x11, + 0x12, 0x06, 0xbf, 0x01, 0x00, 0xc9, 0x01, 0x01, 0x1a, 0x31, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6c, 0x6f, 0x67, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, + 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, + 0x11, 0x01, 0x12, 0x04, 0xbf, 0x01, 0x08, 0x1a, 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x00, + 0x12, 0x04, 0xc1, 0x01, 0x02, 0x1a, 0x1a, 0x67, 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, + 0x61, 0x20, 0x69, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, + 0x74, 0x20, 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, + 0x69, 0x6e, 0x67, 0x20, 0x28, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, + 0x20, 0x64, 0x69, 0x63, 0x74, 0x20, 0x28, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, + 0x6c, 0x65, 0x78, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x00, 0x05, 0x12, 0x04, 0xc1, 0x01, 0x02, 0x07, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x11, 0x02, 0x00, 0x01, 0x12, 0x04, 0xc1, 0x01, 0x08, 0x15, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x11, 0x02, 0x00, 0x03, 0x12, 0x04, 0xc1, 0x01, 0x18, 0x19, 0x0a, 0x2a, 0x0a, 0x04, + 0x04, 0x11, 0x02, 0x01, 0x12, 0x04, 0xc3, 0x01, 0x02, 0x13, 0x1a, 0x1c, 0x20, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, + 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, + 0x05, 0x12, 0x04, 0xc3, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, 0x01, + 0x12, 0x04, 0xc3, 0x01, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, 0x03, 0x12, + 0x04, 0xc3, 0x01, 0x11, 0x12, 0x0a, 0xbc, 0x01, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x02, 0x12, 0x04, + 0xc6, 0x01, 0x02, 0x13, 0x1a, 0xad, 0x01, 0x20, 0x49, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, + 0x70, 0x61, 0x67, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, + 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2d, 0x70, 0x72, 0x6f, + 0x76, 0x69, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, + 0x62, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, 0x65, 0x0a, 0x20, + 0x69, 0x6e, 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, + 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, 0x20, 0x6d, 0x6f, 0x72, 0x65, + 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x65, 0x6d, 0x70, + 0x74, 0x79, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x05, 0x12, 0x04, 0xc6, + 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x01, 0x12, 0x04, 0xc6, 0x01, + 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x03, 0x12, 0x04, 0xc6, 0x01, 0x11, + 0x12, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x03, 0x12, 0x04, 0xc8, 0x01, 0x02, 0x21, 0x1a, 0x33, 0x20, 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x79, 0x65, 0x74, 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, - 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x06, 0x12, 0x04, 0xb5, - 0x01, 0x02, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x01, 0x12, 0x04, 0xb5, 0x01, - 0x0f, 0x1c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x0f, 0x02, 0x05, 0x03, 0x12, 0x04, 0xb5, 0x01, 0x1f, - 0x20, 0x0a, 0x4d, 0x0a, 0x02, 0x04, 0x10, 0x12, 0x06, 0xb9, 0x01, 0x00, 0xbc, 0x01, 0x01, 0x1a, - 0x3f, 0x20, 0x41, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, - 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x61, 0x20, 0x6c, 0x69, 0x73, 0x74, 0x20, 0x6f, - 0x66, 0x20, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, - 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, - 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x10, 0x01, 0x12, 0x04, 0xb9, 0x01, 0x08, 0x1e, 0x0a, 0x2d, 0x0a, - 0x04, 0x04, 0x10, 0x02, 0x00, 0x12, 0x04, 0xbb, 0x01, 0x02, 0x32, 0x1a, 0x1f, 0x20, 0x54, 0x68, - 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6d, 0x65, 0x74, 0x72, - 0x69, 0x63, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x10, 0x02, 0x00, 0x04, 0x12, 0x04, 0xbb, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x10, 0x02, 0x00, 0x06, 0x12, 0x04, 0xbb, 0x01, 0x0b, 0x25, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, - 0x02, 0x00, 0x01, 0x12, 0x04, 0xbb, 0x01, 0x26, 0x2d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x10, 0x02, - 0x00, 0x03, 0x12, 0x04, 0xbb, 0x01, 0x30, 0x31, 0x0a, 0x3f, 0x0a, 0x02, 0x04, 0x11, 0x12, 0x06, - 0xbf, 0x01, 0x00, 0xc9, 0x01, 0x01, 0x1a, 0x31, 0x20, 0x41, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x6f, - 0x67, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x61, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, - 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x11, 0x01, - 0x12, 0x04, 0xbf, 0x01, 0x08, 0x1a, 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x00, 0x12, 0x04, - 0xc1, 0x01, 0x02, 0x1a, 0x1a, 0x67, 0x20, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x20, - 0x69, 0x73, 0x20, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x20, 0x49, 0x74, 0x20, - 0x63, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x61, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, - 0x67, 0x20, 0x28, 0x6a, 0x6f, 0x62, 0x49, 0x64, 0x29, 0x20, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x64, - 0x69, 0x63, 0x74, 0x20, 0x28, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, - 0x78, 0x20, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x29, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, - 0x05, 0x04, 0x11, 0x02, 0x00, 0x05, 0x12, 0x04, 0xc1, 0x01, 0x02, 0x07, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x11, 0x02, 0x00, 0x01, 0x12, 0x04, 0xc1, 0x01, 0x08, 0x15, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x11, 0x02, 0x00, 0x03, 0x12, 0x04, 0xc1, 0x01, 0x18, 0x19, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x11, - 0x02, 0x01, 0x12, 0x04, 0xc3, 0x01, 0x02, 0x13, 0x1a, 0x1c, 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x72, 0x65, - 0x74, 0x75, 0x72, 0x6e, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, 0x05, 0x12, - 0x04, 0xc3, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, 0x01, 0x12, 0x04, - 0xc3, 0x01, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x01, 0x03, 0x12, 0x04, 0xc3, - 0x01, 0x11, 0x12, 0x0a, 0xbc, 0x01, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x02, 0x12, 0x04, 0xc6, 0x01, - 0x02, 0x13, 0x1a, 0xad, 0x01, 0x20, 0x49, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x73, - 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x61, - 0x67, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2d, 0x70, 0x72, 0x6f, 0x76, 0x69, - 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, - 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, 0x61, 0x67, 0x65, 0x0a, 0x20, 0x69, 0x6e, - 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, - 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x65, 0x6d, 0x70, 0x74, 0x79, - 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x05, 0x12, 0x04, 0xc6, 0x01, 0x02, - 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x01, 0x12, 0x04, 0xc6, 0x01, 0x09, 0x0e, - 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x02, 0x03, 0x12, 0x04, 0xc6, 0x01, 0x11, 0x12, 0x0a, - 0x41, 0x0a, 0x04, 0x04, 0x11, 0x02, 0x03, 0x12, 0x04, 0xc8, 0x01, 0x02, 0x21, 0x1a, 0x33, 0x20, - 0x41, 0x20, 0x70, 0x72, 0x65, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x20, 0x79, 0x65, 0x74, - 0x20, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x62, 0x6c, 0x65, 0x20, 0x54, 0x61, 0x73, 0x6b, - 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x64, 0x65, 0x6e, 0x74, 0x69, 0x66, 0x69, 0x65, 0x72, - 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x06, 0x12, 0x04, 0xc8, 0x01, 0x02, - 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x01, 0x12, 0x04, 0xc8, 0x01, 0x0f, 0x1c, - 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x03, 0x12, 0x04, 0xc8, 0x01, 0x1f, 0x20, 0x0a, - 0x0c, 0x0a, 0x02, 0x04, 0x12, 0x12, 0x06, 0xcb, 0x01, 0x00, 0xcf, 0x01, 0x01, 0x0a, 0x0b, 0x0a, - 0x03, 0x04, 0x12, 0x01, 0x12, 0x04, 0xcb, 0x01, 0x08, 0x21, 0x0a, 0xbc, 0x01, 0x0a, 0x04, 0x04, - 0x12, 0x02, 0x00, 0x12, 0x04, 0xce, 0x01, 0x02, 0x13, 0x1a, 0xad, 0x01, 0x20, 0x49, 0x6e, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x75, 0x6c, 0x74, - 0x69, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x61, 0x67, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, - 0x72, 0x2d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, - 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, - 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, 0x20, 0x70, - 0x61, 0x67, 0x65, 0x0a, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, 0x72, 0x79, 0x2e, - 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, - 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, 0x74, - 0x68, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, - 0x65, 0x20, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, - 0x00, 0x05, 0x12, 0x04, 0xce, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x00, - 0x01, 0x12, 0x04, 0xce, 0x01, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, 0x00, 0x03, - 0x12, 0x04, 0xce, 0x01, 0x11, 0x12, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x13, 0x12, 0x06, 0xd1, 0x01, - 0x00, 0xd4, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x13, 0x01, 0x12, 0x04, 0xd1, 0x01, 0x08, - 0x1f, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x13, 0x02, 0x00, 0x12, 0x04, 0xd3, 0x01, 0x02, 0x1e, 0x1a, - 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x20, - 0x6c, 0x6f, 0x67, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, - 0x05, 0x04, 0x13, 0x02, 0x00, 0x04, 0x12, 0x04, 0xd3, 0x01, 0x02, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x13, 0x02, 0x00, 0x05, 0x12, 0x04, 0xd3, 0x01, 0x0b, 0x11, 0x0a, 0x0d, 0x0a, 0x05, 0x04, - 0x13, 0x02, 0x00, 0x01, 0x12, 0x04, 0xd3, 0x01, 0x12, 0x19, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x13, - 0x02, 0x00, 0x03, 0x12, 0x04, 0xd3, 0x01, 0x1c, 0x1d, 0x0a, 0x44, 0x0a, 0x02, 0x04, 0x14, 0x12, - 0x06, 0xd7, 0x01, 0x00, 0xdc, 0x01, 0x01, 0x1a, 0x36, 0x20, 0x41, 0x20, 0x72, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x6c, 0x6f, 0x67, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x20, 0x74, - 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x0a, - 0x0b, 0x0a, 0x03, 0x04, 0x14, 0x01, 0x12, 0x04, 0xd7, 0x01, 0x08, 0x1b, 0x0a, 0x0e, 0x0a, 0x04, - 0x04, 0x14, 0x08, 0x00, 0x12, 0x06, 0xd8, 0x01, 0x02, 0xdb, 0x01, 0x03, 0x0a, 0x0d, 0x0a, 0x05, - 0x04, 0x14, 0x08, 0x00, 0x01, 0x12, 0x04, 0xd8, 0x01, 0x08, 0x0c, 0x0a, 0x0c, 0x0a, 0x04, 0x04, - 0x14, 0x02, 0x00, 0x12, 0x04, 0xd9, 0x01, 0x04, 0x29, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, - 0x00, 0x06, 0x12, 0x04, 0xd9, 0x01, 0x04, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x00, - 0x01, 0x12, 0x04, 0xd9, 0x01, 0x1e, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x00, 0x03, - 0x12, 0x04, 0xd9, 0x01, 0x27, 0x28, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x14, 0x02, 0x01, 0x12, 0x04, - 0xda, 0x01, 0x04, 0x25, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x06, 0x12, 0x04, 0xda, - 0x01, 0x04, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x01, 0x12, 0x04, 0xda, 0x01, - 0x1c, 0x20, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x03, 0x12, 0x04, 0xda, 0x01, 0x23, - 0x24, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0x8d, 0x0f, 0x0a, 0x1c, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, - 0x64, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x1a, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, - 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x07, 0x44, 0x61, - 0x73, 0x6b, 0x4a, 0x6f, 0x62, 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x65, 0x72, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x06, 0x12, 0x04, 0xc8, + 0x01, 0x02, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x01, 0x12, 0x04, 0xc8, 0x01, + 0x0f, 0x1c, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x11, 0x02, 0x03, 0x03, 0x12, 0x04, 0xc8, 0x01, 0x1f, + 0x20, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x12, 0x12, 0x06, 0xcb, 0x01, 0x00, 0xcf, 0x01, 0x01, 0x0a, + 0x0b, 0x0a, 0x03, 0x04, 0x12, 0x01, 0x12, 0x04, 0xcb, 0x01, 0x08, 0x21, 0x0a, 0xbc, 0x01, 0x0a, + 0x04, 0x04, 0x12, 0x02, 0x00, 0x12, 0x04, 0xce, 0x01, 0x02, 0x13, 0x1a, 0xad, 0x01, 0x20, 0x49, + 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x70, 0x61, 0x67, 0x65, 0x73, 0x20, 0x6f, 0x66, 0x20, + 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x65, 0x72, + 0x76, 0x65, 0x72, 0x2d, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x74, + 0x6f, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6e, 0x65, 0x78, 0x74, + 0x20, 0x70, 0x61, 0x67, 0x65, 0x0a, 0x20, 0x69, 0x6e, 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x74, 0x68, 0x65, 0x72, 0x65, 0x20, 0x61, 0x72, 0x65, 0x20, + 0x6e, 0x6f, 0x20, 0x6d, 0x6f, 0x72, 0x65, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2c, + 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, + 0x20, 0x62, 0x65, 0x20, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x0a, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x12, 0x02, 0x00, 0x05, 0x12, 0x04, 0xce, 0x01, 0x02, 0x08, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, + 0x02, 0x00, 0x01, 0x12, 0x04, 0xce, 0x01, 0x09, 0x0e, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x12, 0x02, + 0x00, 0x03, 0x12, 0x04, 0xce, 0x01, 0x11, 0x12, 0x0a, 0x0c, 0x0a, 0x02, 0x04, 0x13, 0x12, 0x06, + 0xd1, 0x01, 0x00, 0xd4, 0x01, 0x01, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x13, 0x01, 0x12, 0x04, 0xd1, + 0x01, 0x08, 0x1f, 0x0a, 0x2a, 0x0a, 0x04, 0x04, 0x13, 0x02, 0x00, 0x12, 0x04, 0xd3, 0x01, 0x02, + 0x1e, 0x1a, 0x1c, 0x20, 0x54, 0x68, 0x65, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, + 0x6e, 0x20, 0x6c, 0x6f, 0x67, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x2e, 0x0a, 0x0a, + 0x0d, 0x0a, 0x05, 0x04, 0x13, 0x02, 0x00, 0x04, 0x12, 0x04, 0xd3, 0x01, 0x02, 0x0a, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x13, 0x02, 0x00, 0x05, 0x12, 0x04, 0xd3, 0x01, 0x0b, 0x11, 0x0a, 0x0d, 0x0a, + 0x05, 0x04, 0x13, 0x02, 0x00, 0x01, 0x12, 0x04, 0xd3, 0x01, 0x12, 0x19, 0x0a, 0x0d, 0x0a, 0x05, + 0x04, 0x13, 0x02, 0x00, 0x03, 0x12, 0x04, 0xd3, 0x01, 0x1c, 0x1d, 0x0a, 0x44, 0x0a, 0x02, 0x04, + 0x14, 0x12, 0x06, 0xd7, 0x01, 0x00, 0xdc, 0x01, 0x01, 0x1a, 0x36, 0x20, 0x41, 0x20, 0x72, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x69, 0x6e, + 0x67, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6c, 0x6f, 0x67, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x2e, + 0x0a, 0x0a, 0x0b, 0x0a, 0x03, 0x04, 0x14, 0x01, 0x12, 0x04, 0xd7, 0x01, 0x08, 0x1b, 0x0a, 0x0e, + 0x0a, 0x04, 0x04, 0x14, 0x08, 0x00, 0x12, 0x06, 0xd8, 0x01, 0x02, 0xdb, 0x01, 0x03, 0x0a, 0x0d, + 0x0a, 0x05, 0x04, 0x14, 0x08, 0x00, 0x01, 0x12, 0x04, 0xd8, 0x01, 0x08, 0x0c, 0x0a, 0x0c, 0x0a, + 0x04, 0x04, 0x14, 0x02, 0x00, 0x12, 0x04, 0xd9, 0x01, 0x04, 0x29, 0x0a, 0x0d, 0x0a, 0x05, 0x04, + 0x14, 0x02, 0x00, 0x06, 0x12, 0x04, 0xd9, 0x01, 0x04, 0x1d, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, + 0x02, 0x00, 0x01, 0x12, 0x04, 0xd9, 0x01, 0x1e, 0x24, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, + 0x00, 0x03, 0x12, 0x04, 0xd9, 0x01, 0x27, 0x28, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x14, 0x02, 0x01, + 0x12, 0x04, 0xda, 0x01, 0x04, 0x25, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x06, 0x12, + 0x04, 0xda, 0x01, 0x04, 0x1b, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x01, 0x12, 0x04, + 0xda, 0x01, 0x1c, 0x20, 0x0a, 0x0d, 0x0a, 0x05, 0x04, 0x14, 0x02, 0x01, 0x03, 0x12, 0x04, 0xda, + 0x01, 0x23, 0x24, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0x8d, 0x0f, 0x0a, 0x1c, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x64, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, + 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x87, 0x01, 0x0a, 0x07, + 0x44, 0x61, 0x73, 0x6b, 0x4a, 0x6f, 0x62, 0x12, 0x3e, 0x0a, 0x09, 0x73, 0x63, 0x68, 0x65, 0x64, + 0x75, 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x44, + 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x09, 0x73, 0x63, + 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x44, 0x61, 0x73, - 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, - 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x3c, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x44, 0x61, 0x73, 0x6b, 0x57, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x73, 0x22, 0x5e, 0x0a, 0x0d, 0x44, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, 0x65, 0x64, - 0x75, 0x6c, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, - 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x0f, 0x44, 0x61, 0x73, 0x6b, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0d, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x57, 0x6f, 0x72, 0x6b, - 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x52, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x73, 0x42, 0xbe, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x09, 0x44, 0x61, - 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, - 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, - 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, - 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x4a, 0xfb, 0x09, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x27, 0x01, 0x0a, 0x08, - 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, - 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x08, 0x0a, - 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x06, - 0x00, 0x4c, 0x0a, 0x2b, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x0f, 0x01, 0x1a, 0x1f, - 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x44, 0x61, 0x73, 0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x0a, 0x0a, - 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x0f, 0x0a, 0x2a, 0x0a, 0x04, 0x04, - 0x00, 0x02, 0x00, 0x12, 0x03, 0x0b, 0x02, 0x1e, 0x1a, 0x1d, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, - 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, - 0x72, 0x20, 0x70, 0x6f, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, - 0x12, 0x03, 0x0b, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, - 0x0b, 0x10, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0b, 0x1c, - 0x1d, 0x0a, 0x30, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x02, 0x1e, 0x1a, 0x23, - 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, 0x0e, 0x02, - 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0e, 0x12, 0x19, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0e, 0x1c, 0x1d, 0x0a, 0x32, 0x0a, - 0x02, 0x04, 0x01, 0x12, 0x04, 0x12, 0x00, 0x18, 0x01, 0x1a, 0x26, 0x20, 0x53, 0x70, 0x65, 0x63, - 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x2e, - 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x12, 0x08, 0x15, 0x0a, 0x4b, 0x0a, - 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x14, 0x02, 0x13, 0x1a, 0x3e, 0x20, 0x4f, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x75, - 0x73, 0x65, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x77, 0x69, - 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, - 0x02, 0x00, 0x05, 0x12, 0x03, 0x14, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, - 0x01, 0x12, 0x03, 0x14, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, - 0x03, 0x14, 0x11, 0x12, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x17, 0x02, - 0x29, 0x1a, 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x61, 0x73, - 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x63, - 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x17, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x17, 0x1b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x01, 0x03, 0x12, 0x03, 0x17, 0x27, 0x28, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x1a, - 0x00, 0x27, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x1a, 0x08, 0x17, 0x0a, - 0x2e, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x1c, 0x02, 0x1f, 0x1a, 0x21, 0x20, 0x4e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, - 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x0a, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x1c, 0x02, 0x08, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1c, 0x09, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1c, 0x1d, 0x1e, 0x0a, 0x6c, 0x0a, 0x04, 0x04, 0x02, 0x02, - 0x01, 0x12, 0x03, 0x1f, 0x02, 0x13, 0x1a, 0x5f, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, - 0x20, 0x49, 0x66, 0x20, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, - 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, - 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x05, - 0x12, 0x03, 0x1f, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, - 0x1f, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1f, 0x11, - 0x12, 0x0a, 0x83, 0x03, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x26, 0x02, 0x29, 0x1a, - 0xf5, 0x02, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x61, 0x73, 0x73, - 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, 0x6c, 0x6c, - 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, - 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x0a, 0x20, 0x41, 0x73, 0x20, 0x70, - 0x65, 0x72, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x72, - 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2e, 0x64, 0x61, 0x73, 0x6b, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x65, - 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x3f, 0x68, 0x69, 0x67, 0x68, 0x6c, 0x69, - 0x67, 0x68, 0x74, 0x3d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x23, 0x62, 0x65, 0x73, 0x74, 0x2d, 0x70, - 0x72, 0x61, 0x63, 0x74, 0x69, 0x63, 0x65, 0x73, 0x0a, 0x20, 0x69, 0x74, 0x20, 0x69, 0x73, 0x20, - 0x61, 0x64, 0x76, 0x69, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x6f, 0x6e, 0x6c, 0x79, 0x20, - 0x73, 0x65, 0x74, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, 0x74, 0x20, - 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x6c, 0x79, 0x20, 0x73, 0x65, 0x74, 0x2c, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, - 0x6d, 0x61, 0x6b, 0x65, 0x0a, 0x20, 0x73, 0x75, 0x72, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, - 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3d, 0x3d, 0x6c, 0x69, 0x6d, 0x69, - 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, - 0x73, 0x65, 0x74, 0x73, 0x20, 0x60, 0x20, 0x2d, 0x2d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2d, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x60, 0x20, 0x61, 0x73, 0x20, 0x77, 0x65, 0x6c, 0x6c, 0x20, 0x61, - 0x73, 0x20, 0x60, 0x2d, 0x2d, 0x6e, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x60, 0x20, 0x66, - 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x20, 0x61, - 0x63, 0x63, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x06, - 0x12, 0x03, 0x26, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, - 0x26, 0x1b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x26, 0x27, - 0x28, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xc7, 0x0b, 0x0a, 0x1f, 0x66, 0x6c, - 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, - 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x22, 0xc1, 0x01, 0x0a, 0x0d, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x64, 0x7a, 0x76, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, - 0x6e, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x64, 0x7a, 0x76, 0x42, 0x61, - 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, - 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6e, - 0x70, 0x72, 0x6f, 0x63, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, - 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x73, 0x22, 0x83, 0x01, 0x0a, 0x1e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, - 0x75, 0x74, 0x65, 0x64, 0x50, 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, - 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x73, 0x12, 0x47, 0x0a, 0x0e, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x45, 0x6c, - 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x65, 0x6c, 0x61, - 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xc1, 0x01, 0x0a, 0x15, 0x63, - 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0c, 0x50, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x50, 0x72, 0x6f, + 0x6b, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x52, 0x07, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x22, 0x5e, 0x0a, 0x0d, 0x44, 0x61, 0x73, 0x6b, 0x53, 0x63, 0x68, + 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x73, 0x22, 0x8c, 0x01, 0x0a, 0x0f, 0x44, 0x61, 0x73, 0x6b, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x09, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, + 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x73, 0x42, 0xbe, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x09, + 0x44, 0x61, 0x73, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, + 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, + 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, + 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xfb, 0x09, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x27, 0x01, + 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, + 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, + 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, + 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x2b, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x0f, 0x01, + 0x1a, 0x1f, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x44, 0x61, 0x73, 0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, + 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x0f, 0x0a, 0x2a, 0x0a, + 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0b, 0x02, 0x1e, 0x1a, 0x1d, 0x20, 0x53, 0x70, 0x65, + 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, + 0x6c, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x06, 0x12, 0x03, 0x0b, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, + 0x12, 0x03, 0x0b, 0x10, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, + 0x0b, 0x1c, 0x1d, 0x0a, 0x30, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x02, 0x1e, + 0x1a, 0x23, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, + 0x6f, 0x75, 0x70, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x06, 0x12, 0x03, + 0x0e, 0x02, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0e, 0x12, + 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0e, 0x1c, 0x1d, 0x0a, + 0x32, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x12, 0x00, 0x18, 0x01, 0x1a, 0x26, 0x20, 0x53, 0x70, + 0x65, 0x63, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x20, 0x70, 0x6f, + 0x64, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x12, 0x08, 0x15, 0x0a, + 0x4b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x14, 0x02, 0x13, 0x1a, 0x3e, 0x20, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, + 0x20, 0x75, 0x73, 0x65, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x2c, 0x20, + 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, 0x14, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x00, 0x01, 0x12, 0x03, 0x14, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x14, 0x11, 0x12, 0x0a, 0x37, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, + 0x17, 0x02, 0x29, 0x1a, 0x2a, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, + 0x61, 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x17, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x17, 0x1b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x17, 0x27, 0x28, 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x02, 0x12, + 0x04, 0x1a, 0x00, 0x27, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x1a, 0x08, + 0x17, 0x0a, 0x2e, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x1c, 0x02, 0x1f, 0x1a, 0x21, + 0x20, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x1c, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1c, 0x09, 0x1a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1c, 0x1d, 0x1e, 0x0a, 0x6c, 0x0a, 0x04, 0x04, + 0x02, 0x02, 0x01, 0x12, 0x03, 0x1f, 0x02, 0x13, 0x1a, 0x5f, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x20, 0x74, 0x6f, 0x20, 0x75, 0x73, 0x65, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x6f, 0x66, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, + 0x70, 0x2e, 0x20, 0x49, 0x66, 0x20, 0x75, 0x6e, 0x73, 0x65, 0x74, 0x2c, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x75, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x20, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, + 0x12, 0x03, 0x1f, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, + 0x1f, 0x11, 0x12, 0x0a, 0x83, 0x03, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x26, 0x02, + 0x29, 0x1a, 0xf5, 0x02, 0x20, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x73, 0x20, 0x61, + 0x73, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x61, + 0x6c, 0x6c, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x0a, 0x20, 0x41, 0x73, + 0x20, 0x70, 0x65, 0x72, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x6b, 0x75, 0x62, + 0x65, 0x72, 0x6e, 0x65, 0x74, 0x65, 0x73, 0x2e, 0x64, 0x61, 0x73, 0x6b, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x3f, 0x68, 0x69, 0x67, 0x68, + 0x6c, 0x69, 0x67, 0x68, 0x74, 0x3d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x23, 0x62, 0x65, 0x73, 0x74, + 0x2d, 0x70, 0x72, 0x61, 0x63, 0x74, 0x69, 0x63, 0x65, 0x73, 0x0a, 0x20, 0x69, 0x74, 0x20, 0x69, + 0x73, 0x20, 0x61, 0x64, 0x76, 0x69, 0x73, 0x65, 0x64, 0x20, 0x74, 0x6f, 0x20, 0x6f, 0x6e, 0x6c, + 0x79, 0x20, 0x73, 0x65, 0x74, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x73, 0x2e, 0x20, 0x49, 0x66, + 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x6e, 0x6f, + 0x74, 0x20, 0x65, 0x78, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x6c, 0x79, 0x20, 0x73, 0x65, 0x74, + 0x2c, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x6d, 0x61, 0x6b, 0x65, 0x0a, 0x20, 0x73, 0x75, 0x72, 0x65, 0x20, 0x74, 0x6f, 0x20, + 0x73, 0x65, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x3d, 0x3d, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x73, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x65, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x20, 0x73, 0x65, 0x74, 0x73, 0x20, 0x60, 0x20, 0x2d, 0x2d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x2d, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x60, 0x20, 0x61, 0x73, 0x20, 0x77, 0x65, 0x6c, 0x6c, + 0x20, 0x61, 0x73, 0x20, 0x60, 0x2d, 0x2d, 0x6e, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x60, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, + 0x20, 0x61, 0x63, 0x63, 0x6f, 0x72, 0x64, 0x69, 0x6e, 0x67, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x02, 0x06, 0x12, 0x03, 0x26, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x26, 0x1b, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, + 0x26, 0x27, 0x28, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xf3, 0x08, 0x0a, 0x1b, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2f, 0x6d, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x87, + 0x01, 0x0a, 0x1a, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x4d, 0x50, + 0x49, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x1f, 0x0a, + 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0a, 0x6e, 0x75, 0x6d, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x32, + 0x0a, 0x15, 0x6e, 0x75, 0x6d, 0x5f, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x5f, 0x72, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x6e, + 0x75, 0x6d, 0x4c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x05, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x42, 0xbd, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, + 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x42, 0x08, 0x4d, 0x70, 0x69, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, + 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, + 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, + 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xee, 0x05, 0x0a, 0x06, 0x12, 0x04, 0x00, + 0x00, 0x13, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, + 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, + 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0xe3, 0x01, 0x0a, + 0x02, 0x04, 0x00, 0x12, 0x04, 0x08, 0x00, 0x13, 0x01, 0x1a, 0xd6, 0x01, 0x20, 0x4d, 0x50, 0x49, + 0x20, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x20, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, + 0x61, 0x6c, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x63, + 0x6f, 0x6d, 0x6d, 0x75, 0x6e, 0x69, 0x74, 0x79, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x6d, 0x61, + 0x73, 0x74, 0x65, 0x72, 0x2f, 0x70, 0x72, 0x6f, 0x70, 0x6f, 0x73, 0x61, 0x6c, 0x73, 0x2f, 0x6d, + 0x70, 0x69, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x70, 0x72, 0x6f, 0x70, + 0x6f, 0x73, 0x61, 0x6c, 0x2e, 0x6d, 0x64, 0x0a, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x64, 0x69, + 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, + 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, + 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x6d, 0x70, 0x69, 0x2d, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, + 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x08, 0x08, 0x22, 0x0a, 0x43, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0a, 0x02, 0x18, 0x1a, 0x36, 0x20, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x73, + 0x70, 0x61, 0x77, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, + 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x0a, 0x02, + 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0a, 0x08, 0x13, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0a, 0x16, 0x17, 0x0a, 0x9f, 0x01, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x02, 0x22, 0x1a, 0x91, 0x01, 0x20, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, + 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x20, + 0x54, 0x68, 0x65, 0x20, 0x6c, 0x61, 0x75, 0x6e, 0x63, 0x68, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, + 0x20, 0x69, 0x6e, 0x76, 0x6f, 0x6b, 0x65, 0x73, 0x20, 0x6d, 0x70, 0x69, 0x72, 0x75, 0x6e, 0x20, + 0x61, 0x6e, 0x64, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x75, 0x6e, 0x69, 0x63, 0x61, 0x74, 0x65, 0x73, + 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, + 0x73, 0x20, 0x74, 0x68, 0x72, 0x6f, 0x75, 0x67, 0x68, 0x20, 0x4d, 0x50, 0x49, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x07, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0e, 0x08, 0x1d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0e, 0x20, 0x21, 0x0a, 0x64, 0x0a, 0x04, 0x04, 0x00, 0x02, + 0x02, 0x12, 0x03, 0x12, 0x02, 0x12, 0x1a, 0x57, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, + 0x6f, 0x66, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x20, 0x75, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x68, 0x6f, 0x73, 0x74, + 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x65, 0x20, 0x61, 0x76, 0x61, 0x69, 0x6c, + 0x61, 0x62, 0x6c, 0x65, 0x20, 0x73, 0x6c, 0x6f, 0x74, 0x73, 0x20, 0x28, 0x47, 0x50, 0x55, 0x73, + 0x29, 0x20, 0x69, 0x6e, 0x20, 0x65, 0x61, 0x63, 0x68, 0x20, 0x70, 0x6f, 0x64, 0x2e, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x12, 0x02, 0x07, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x12, 0x08, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x12, 0x10, 0x11, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, 0x0a, 0xc0, 0x06, 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x70, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x65, 0x73, + 0x74, 0x6f, 0x51, 0x75, 0x65, 0x72, 0x79, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x6f, 0x75, 0x74, 0x69, + 0x6e, 0x67, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x72, 0x6f, 0x75, 0x74, 0x69, 0x6e, 0x67, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x12, 0x18, 0x0a, 0x07, + 0x63, 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, + 0x61, 0x74, 0x61, 0x6c, 0x6f, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1c, + 0x0a, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x42, 0xc0, 0x01, 0x0a, + 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0b, 0x50, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, + 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, + 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, + 0xba, 0x03, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x0d, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, + 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x08, + 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, + 0x04, 0x00, 0x4c, 0x0a, 0x9e, 0x01, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x08, 0x00, 0x0d, 0x01, + 0x1a, 0x91, 0x01, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x27, 0x70, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x27, 0x20, 0x74, 0x61, 0x73, 0x6b, 0x20, 0x74, 0x79, + 0x70, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x53, 0x44, 0x4b, 0x20, 0x61, 0x6e, + 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x20, + 0x74, 0x68, 0x61, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x69, 0x6e, 0x20, + 0x74, 0x68, 0x65, 0x20, 0x27, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x27, 0x20, 0x66, 0x69, 0x65, + 0x6c, 0x64, 0x0a, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x50, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x20, + 0x74, 0x61, 0x73, 0x6b, 0x27, 0x73, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, 0x6c, + 0x61, 0x74, 0x65, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x08, 0x08, 0x13, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x09, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x09, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x09, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, + 0x03, 0x0a, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0a, + 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0a, 0x09, 0x10, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0a, 0x13, 0x14, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0b, 0x02, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0b, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x0b, 0x09, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, + 0x12, 0x03, 0x0b, 0x12, 0x13, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x0c, + 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, 0x12, 0x03, 0x0c, 0x02, 0x08, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x0c, 0x09, 0x12, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x0c, 0x15, 0x16, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xc7, 0x0b, 0x0a, 0x1f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x70, 0x79, 0x74, 0x6f, 0x72, + 0x63, 0x68, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xc1, 0x01, 0x0a, 0x0d, + 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x21, 0x0a, + 0x0c, 0x72, 0x64, 0x7a, 0x76, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x64, 0x7a, 0x76, 0x42, 0x61, 0x63, 0x6b, 0x65, 0x6e, 0x64, + 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, + 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x24, 0x0a, 0x0e, 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x5f, + 0x70, 0x65, 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, + 0x6e, 0x70, 0x72, 0x6f, 0x63, 0x50, 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x21, 0x0a, 0x0c, + 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x73, 0x22, + 0x83, 0x01, 0x0a, 0x1e, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x50, + 0x79, 0x54, 0x6f, 0x72, 0x63, 0x68, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x54, 0x61, + 0x73, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x47, 0x0a, 0x0e, + 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x0d, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0xc1, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, + 0x0c, 0x50, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, + 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xfa, 0x06, 0x0a, 0x06, 0x12, 0x04, + 0x00, 0x00, 0x17, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, + 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, + 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0xd4, 0x01, + 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x08, 0x00, 0x0e, 0x01, 0x1a, 0xc7, 0x01, 0x20, 0x43, 0x75, + 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, + 0x6f, 0x72, 0x63, 0x68, 0x20, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x20, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, + 0x6e, 0x67, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, + 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x62, 0x6c, 0x6f, 0x62, 0x2f, 0x65, 0x33, 0x31, + 0x64, 0x31, 0x31, 0x66, 0x61, 0x61, 0x39, 0x66, 0x36, 0x63, 0x65, 0x35, 0x31, 0x31, 0x31, 0x62, + 0x36, 0x30, 0x63, 0x30, 0x31, 0x30, 0x37, 0x39, 0x64, 0x33, 0x39, 0x32, 0x39, 0x35, 0x35, 0x38, + 0x39, 0x65, 0x30, 0x65, 0x66, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x6b, + 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x76, 0x31, 0x2f, 0x70, + 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2e, 0x67, 0x6f, 0x23, + 0x4c, 0x39, 0x38, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x08, 0x08, 0x15, + 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, 0x02, 0x1a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x09, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x09, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, + 0x03, 0x0a, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0a, + 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0a, 0x08, 0x14, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x0a, 0x17, 0x18, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0b, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0b, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, + 0x12, 0x03, 0x0b, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x0c, + 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, 0x12, 0x03, 0x0c, 0x02, 0x07, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x0c, 0x08, 0x16, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x0c, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x0d, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x0d, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x0d, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x0d, 0x17, 0x18, 0x0a, 0x71, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x11, 0x00, 0x17, 0x01, 0x1a, + 0x65, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, + 0x6f, 0x72, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, + 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, + 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, + 0x67, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, 0x72, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x11, + 0x08, 0x26, 0x0a, 0x4c, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x13, 0x02, 0x14, 0x1a, + 0x3f, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x61, 0x77, + 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, 0x0a, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, 0x13, 0x02, 0x07, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x13, 0x08, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x13, 0x12, 0x13, 0x0a, 0x30, 0x0a, 0x04, 0x04, 0x01, + 0x02, 0x01, 0x12, 0x03, 0x16, 0x02, 0x23, 0x1a, 0x23, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x20, + 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x16, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x01, 0x01, 0x12, 0x03, 0x16, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, + 0x03, 0x12, 0x03, 0x16, 0x21, 0x22, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xdf, + 0x0b, 0x0a, 0x1e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x71, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x22, 0x62, 0x0a, 0x09, 0x48, 0x69, 0x76, 0x65, 0x51, 0x75, 0x65, 0x72, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x6f, + 0x75, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x74, 0x69, + 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x53, 0x65, 0x63, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x72, 0x65, + 0x74, 0x72, 0x79, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x4d, 0x0a, 0x13, 0x48, 0x69, 0x76, 0x65, + 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x36, 0x0a, 0x07, 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x07, + 0x71, 0x75, 0x65, 0x72, 0x69, 0x65, 0x73, 0x22, 0xd3, 0x01, 0x0a, 0x0d, 0x51, 0x75, 0x62, 0x6f, + 0x6c, 0x65, 0x48, 0x69, 0x76, 0x65, 0x4a, 0x6f, 0x62, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x55, + 0x0a, 0x10, 0x71, 0x75, 0x65, 0x72, 0x79, 0x5f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, + 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x42, 0x02, 0x18, 0x01, 0x52, 0x0f, 0x71, 0x75, 0x65, 0x72, 0x79, 0x43, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x03, 0x20, + 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, + 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x69, 0x76, + 0x65, 0x51, 0x75, 0x65, 0x72, 0x79, 0x52, 0x05, 0x71, 0x75, 0x65, 0x72, 0x79, 0x42, 0xc0, 0x01, + 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0b, 0x51, 0x75, 0x62, 0x6f, 0x6c, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, + 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, + 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x4a, 0xd5, 0x06, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x19, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, + 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, + 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, + 0x03, 0x04, 0x00, 0x4c, 0x0a, 0x3b, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x07, 0x00, 0x0b, 0x01, + 0x1a, 0x2f, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x61, 0x20, 0x71, 0x75, 0x65, + 0x72, 0x79, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x65, 0x20, 0x6f, 0x6e, + 0x20, 0x61, 0x20, 0x68, 0x69, 0x76, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2e, + 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x07, 0x08, 0x11, 0x0a, 0x0b, 0x0a, + 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x08, 0x02, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x00, 0x05, 0x12, 0x03, 0x08, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, + 0x01, 0x12, 0x03, 0x08, 0x09, 0x0e, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, + 0x03, 0x08, 0x11, 0x12, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x09, 0x02, + 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x09, 0x02, 0x08, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x09, 0x09, 0x14, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, 0x09, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x02, 0x12, 0x03, 0x0a, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, + 0x05, 0x12, 0x03, 0x0a, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, + 0x03, 0x0a, 0x09, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0a, + 0x16, 0x17, 0x0a, 0x33, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x0e, 0x00, 0x10, 0x01, 0x1a, 0x27, + 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x6f, 0x66, 0x20, 0x68, 0x69, 0x76, 0x65, 0x20, 0x71, 0x75, + 0x65, 0x72, 0x69, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, + 0x0e, 0x08, 0x1b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x0f, 0x02, 0x21, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x04, 0x12, 0x03, 0x0f, 0x02, 0x0a, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0f, 0x0b, 0x14, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0f, 0x15, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x0f, 0x1f, 0x20, 0x0a, 0x9a, 0x01, 0x0a, 0x02, 0x04, 0x02, 0x12, + 0x04, 0x14, 0x00, 0x19, 0x01, 0x1a, 0x8d, 0x01, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x73, 0x20, 0x77, 0x69, 0x74, 0x68, + 0x20, 0x74, 0x68, 0x65, 0x20, 0x27, 0x68, 0x69, 0x76, 0x65, 0x27, 0x20, 0x74, 0x61, 0x73, 0x6b, + 0x20, 0x74, 0x79, 0x70, 0x65, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x53, 0x44, 0x4b, + 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, + 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x27, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x27, 0x20, + 0x66, 0x69, 0x65, 0x6c, 0x64, 0x0a, 0x20, 0x6f, 0x66, 0x20, 0x61, 0x20, 0x68, 0x69, 0x76, 0x65, + 0x20, 0x74, 0x61, 0x73, 0x6b, 0x27, 0x73, 0x20, 0x54, 0x61, 0x73, 0x6b, 0x54, 0x65, 0x6d, 0x70, + 0x6c, 0x61, 0x74, 0x65, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x14, 0x08, + 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x15, 0x02, 0x1b, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x05, 0x12, 0x03, 0x15, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x15, 0x09, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x00, 0x03, 0x12, 0x03, 0x15, 0x19, 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, + 0x12, 0x03, 0x16, 0x02, 0x3f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, + 0x16, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x16, 0x16, + 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x16, 0x29, 0x2a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x08, 0x12, 0x03, 0x16, 0x2b, 0x3e, 0x0a, 0x0d, 0x0a, + 0x06, 0x04, 0x02, 0x02, 0x01, 0x08, 0x03, 0x12, 0x03, 0x16, 0x2c, 0x3d, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x02, 0x02, 0x02, 0x12, 0x03, 0x17, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x02, 0x04, 0x12, 0x03, 0x17, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x05, + 0x12, 0x03, 0x17, 0x0b, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x01, 0x12, 0x03, + 0x17, 0x12, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x02, 0x03, 0x12, 0x03, 0x17, 0x19, + 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x03, 0x12, 0x03, 0x18, 0x02, 0x16, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x02, 0x02, 0x03, 0x06, 0x12, 0x03, 0x18, 0x02, 0x0b, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x02, 0x02, 0x03, 0x01, 0x12, 0x03, 0x18, 0x0c, 0x11, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, + 0x02, 0x03, 0x03, 0x12, 0x03, 0x18, 0x14, 0x15, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0a, 0xe7, 0x1f, 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x72, 0x61, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, + 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, + 0x93, 0x02, 0x0a, 0x06, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x12, 0x3e, 0x0a, 0x0b, 0x72, 0x61, + 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x0a, + 0x72, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, 0x0b, 0x72, 0x75, + 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x02, 0x18, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x12, + 0x3d, 0x0a, 0x1b, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, 0x66, 0x74, 0x65, + 0x72, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x12, 0x3b, + 0x0a, 0x1a, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, + 0x74, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x17, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x41, 0x66, + 0x74, 0x65, 0x72, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x10, 0x72, + 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, + 0x76, 0x59, 0x61, 0x6d, 0x6c, 0x22, 0xd5, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x52, + 0x0d, 0x68, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x4e, + 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, + 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0f, 0x77, + 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x2d, + 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, + 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x22, 0xe3, 0x01, + 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, + 0x5e, 0x0a, 0x10, 0x72, 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x65, + 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, + 0x0e, 0x72, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, + 0x2f, 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, + 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, + 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, + 0x02, 0x38, 0x01, 0x22, 0xe8, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, + 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x6f, 0x75, 0x70, + 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x6f, + 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, + 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x61, 0x78, + 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, 0x61, 0x79, 0x5f, + 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x05, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, 0x61, 0x79, 0x53, + 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x6b, 0x38, + 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, + 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, + 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0xbd, + 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, + 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x08, 0x52, 0x61, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, @@ -1522,485 +2092,415 @@ pub const FILE_DESCRIPTOR_SET: &[u8] = &[ 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xfa, - 0x06, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x17, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, - 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x08, 0x0a, - 0x01, 0x08, 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x04, - 0x00, 0x4c, 0x0a, 0xd4, 0x01, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x08, 0x00, 0x0e, 0x01, 0x1a, - 0xc7, 0x01, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, - 0x66, 0x6f, 0x72, 0x20, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x20, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, - 0x63, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x64, 0x69, 0x73, - 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, - 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x0a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, - 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, - 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2f, 0x62, 0x6c, 0x6f, - 0x62, 0x2f, 0x65, 0x33, 0x31, 0x64, 0x31, 0x31, 0x66, 0x61, 0x61, 0x39, 0x66, 0x36, 0x63, 0x65, - 0x35, 0x31, 0x31, 0x31, 0x62, 0x36, 0x30, 0x63, 0x30, 0x31, 0x30, 0x37, 0x39, 0x64, 0x33, 0x39, - 0x32, 0x39, 0x35, 0x35, 0x38, 0x39, 0x65, 0x30, 0x65, 0x66, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, - 0x70, 0x69, 0x73, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x6f, 0x72, 0x67, - 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2e, 0x67, 0x6f, 0x23, 0x4c, 0x39, 0x38, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, - 0x12, 0x03, 0x08, 0x08, 0x15, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, - 0x02, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x09, 0x02, 0x08, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x09, 0x15, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x09, 0x18, 0x19, 0x0a, 0x0b, 0x0a, 0x04, - 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0a, 0x02, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x01, 0x05, 0x12, 0x03, 0x0a, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x01, - 0x12, 0x03, 0x0a, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, 0x03, - 0x0a, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0b, 0x02, 0x19, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0b, 0x02, 0x07, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0b, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0b, 0x17, 0x18, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, - 0x02, 0x03, 0x12, 0x03, 0x0c, 0x02, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, - 0x12, 0x03, 0x0c, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, - 0x0c, 0x08, 0x16, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x0c, 0x19, - 0x1a, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x0d, 0x02, 0x19, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x05, 0x12, 0x03, 0x0d, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x04, 0x01, 0x12, 0x03, 0x0d, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x04, 0x03, 0x12, 0x03, 0x0d, 0x17, 0x18, 0x0a, 0x71, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, - 0x11, 0x00, 0x17, 0x01, 0x1a, 0x65, 0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x74, - 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x20, 0x64, 0x69, 0x73, 0x74, - 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, - 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, - 0x6f, 0x77, 0x2f, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, - 0x01, 0x01, 0x12, 0x03, 0x11, 0x08, 0x26, 0x0a, 0x4c, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, - 0x03, 0x13, 0x02, 0x14, 0x1a, 0x3f, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, - 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, - 0x20, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, - 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x05, 0x12, 0x03, - 0x13, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x13, 0x08, - 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x13, 0x12, 0x13, 0x0a, - 0x30, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x16, 0x02, 0x23, 0x1a, 0x23, 0x20, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x6c, 0x61, - 0x73, 0x74, 0x69, 0x63, 0x20, 0x70, 0x79, 0x74, 0x6f, 0x72, 0x63, 0x68, 0x20, 0x6a, 0x6f, 0x62, - 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x16, 0x02, 0x0f, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x16, 0x10, 0x1e, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x16, 0x21, 0x22, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, 0x0a, 0xe7, 0x1f, 0x0a, 0x1b, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x72, 0x61, 0x79, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x93, 0x02, 0x0a, 0x06, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x12, 0x3e, 0x0a, - 0x0b, 0x72, 0x61, 0x79, 0x5f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x52, 0x0a, 0x72, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x23, 0x0a, - 0x0b, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x42, 0x02, 0x18, 0x01, 0x52, 0x0a, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, - 0x6e, 0x76, 0x12, 0x3d, 0x0a, 0x1b, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, - 0x66, 0x74, 0x65, 0x72, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x18, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, - 0x6e, 0x41, 0x66, 0x74, 0x65, 0x72, 0x4a, 0x6f, 0x62, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, - 0x73, 0x12, 0x3b, 0x0a, 0x1a, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, - 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x17, 0x74, 0x74, 0x6c, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, - 0x73, 0x41, 0x66, 0x74, 0x65, 0x72, 0x46, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x12, 0x28, - 0x0a, 0x10, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x5f, 0x79, 0x61, - 0x6d, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, - 0x65, 0x45, 0x6e, 0x76, 0x59, 0x61, 0x6d, 0x6c, 0x22, 0xd5, 0x01, 0x0a, 0x0a, 0x52, 0x61, 0x79, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x0f, 0x68, 0x65, 0x61, 0x64, 0x5f, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x20, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, - 0x65, 0x63, 0x52, 0x0d, 0x68, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x4e, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x5f, 0x67, 0x72, 0x6f, 0x75, - 0x70, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, - 0x52, 0x0f, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, - 0x63, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x61, 0x75, 0x74, 0x6f, - 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x65, - 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, - 0x22, 0xe3, 0x01, 0x0a, 0x0d, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, - 0x65, 0x63, 0x12, 0x5e, 0x0a, 0x10, 0x72, 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, - 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, 0x2e, 0x66, + 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0x91, + 0x15, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x3a, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, + 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, + 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x06, + 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x3c, 0x0a, + 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x16, 0x01, 0x1a, 0x30, 0x20, 0x52, 0x61, 0x79, 0x4a, + 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, + 0x20, 0x6f, 0x66, 0x20, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, + 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x0e, 0x0a, 0x44, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, + 0x03, 0x0b, 0x02, 0x1d, 0x1a, 0x37, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x53, 0x70, 0x65, 0x63, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x74, 0x6f, + 0x20, 0x72, 0x75, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0b, 0x02, 0x0c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x0d, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x00, 0x03, 0x12, 0x03, 0x0b, 0x1b, 0x1c, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x01, + 0x12, 0x03, 0x0e, 0x02, 0x2d, 0x1a, 0x92, 0x01, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, + 0x5f, 0x65, 0x6e, 0x76, 0x20, 0x69, 0x73, 0x20, 0x62, 0x61, 0x73, 0x65, 0x36, 0x34, 0x20, 0x65, + 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x52, 0x61, 0x79, 0x20, 0x72, 0x75, 0x6e, + 0x74, 0x69, 0x6d, 0x65, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x73, 0x3a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, + 0x72, 0x61, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x2f, 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x69, + 0x6e, 0x67, 0x2d, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, 0x65, 0x73, 0x2e, + 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2d, 0x65, 0x6e, 0x76, + 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, + 0x02, 0x01, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, + 0x01, 0x12, 0x03, 0x0e, 0x09, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x03, 0x12, + 0x03, 0x0e, 0x17, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x08, 0x12, 0x03, 0x0e, + 0x19, 0x2c, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x02, 0x01, 0x08, 0x03, 0x12, 0x03, 0x0e, 0x1a, + 0x2b, 0x0a, 0x78, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x10, 0x02, 0x27, 0x1a, 0x6b, + 0x20, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, + 0x6a, 0x6f, 0x62, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x20, 0x73, 0x70, 0x65, + 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x73, 0x68, + 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, + 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, + 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x10, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x02, 0x01, 0x12, 0x03, 0x10, 0x07, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, + 0x12, 0x03, 0x10, 0x25, 0x26, 0x0a, 0x90, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, + 0x12, 0x02, 0x27, 0x1a, 0x82, 0x01, 0x20, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, + 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, + 0x64, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, 0x20, 0x74, 0x68, + 0x65, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x77, 0x69, 0x6c, + 0x6c, 0x20, 0x62, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x61, 0x66, 0x74, + 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x20, 0x66, 0x69, + 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, + 0x05, 0x12, 0x03, 0x12, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, + 0x03, 0x12, 0x08, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x12, + 0x25, 0x26, 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x15, 0x02, 0x1e, 0x1a, + 0x68, 0x20, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x59, 0x41, 0x4d, 0x4c, + 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, + 0x65, 0x6e, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x0a, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x20, 0x61, 0x73, 0x20, 0x61, + 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x2d, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x59, 0x41, 0x4d, 0x4c, + 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x04, 0x05, 0x12, 0x03, 0x15, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x01, + 0x12, 0x03, 0x15, 0x09, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, 0x03, 0x12, 0x03, + 0x15, 0x1c, 0x1d, 0x0a, 0x48, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x19, 0x00, 0x20, 0x01, 0x1a, + 0x3c, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x52, 0x61, 0x79, 0x20, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x74, 0x68, 0x65, + 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x73, 0x74, 0x61, 0x74, 0x65, 0x20, 0x6f, + 0x66, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, + 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x19, 0x08, 0x12, 0x0a, 0x3b, 0x0a, 0x04, 0x04, 0x01, 0x02, + 0x00, 0x12, 0x03, 0x1b, 0x02, 0x24, 0x1a, 0x2e, 0x20, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, + 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x73, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x65, 0x61, + 0x64, 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, + 0x03, 0x1b, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x1b, + 0x10, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x1b, 0x22, 0x23, + 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x1d, 0x02, 0x31, 0x1a, 0x34, 0x20, + 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x73, + 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, 0x73, 0x20, 0x66, + 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x70, 0x6f, + 0x64, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x04, 0x12, 0x03, 0x1d, 0x02, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x1d, 0x0b, 0x1a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1d, 0x1b, 0x2c, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1d, 0x2f, 0x30, 0x0a, 0x2d, 0x0a, 0x04, 0x04, + 0x01, 0x02, 0x02, 0x12, 0x03, 0x1f, 0x02, 0x1e, 0x1a, 0x20, 0x20, 0x57, 0x68, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x20, 0x61, 0x75, 0x74, + 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x02, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, + 0x01, 0x12, 0x03, 0x1f, 0x07, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, + 0x03, 0x1f, 0x1c, 0x1d, 0x0a, 0x39, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x23, 0x00, 0x29, 0x01, + 0x1a, 0x2d, 0x20, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, + 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, + 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x65, 0x61, 0x64, 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, + 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x23, 0x08, 0x15, 0x0a, 0xb7, 0x01, 0x0a, 0x04, + 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x26, 0x02, 0x2b, 0x1a, 0xa9, 0x01, 0x20, 0x4f, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, + 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, + 0x73, 0x73, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x2d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x66, 0x65, 0x72, 0x20, + 0x74, 0x6f, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, + 0x72, 0x61, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, + 0x2f, 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, + 0x65, 0x2d, 0x72, 0x65, 0x66, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x61, 0x79, 0x2d, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x06, 0x12, 0x03, + 0x26, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, 0x03, 0x26, 0x16, + 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x26, 0x29, 0x2a, 0x0a, + 0x2c, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x28, 0x02, 0x1a, 0x1a, 0x1f, 0x20, 0x50, + 0x6f, 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, + 0x72, 0x61, 0x79, 0x20, 0x68, 0x65, 0x61, 0x64, 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x28, 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x28, 0x0e, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x28, 0x18, 0x19, 0x0a, 0x3f, 0x0a, 0x02, 0x04, 0x03, 0x12, 0x04, 0x2c, + 0x00, 0x3a, 0x01, 0x1a, 0x33, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, + 0x70, 0x53, 0x70, 0x65, 0x63, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, + 0x65, 0x63, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x03, 0x01, 0x12, + 0x03, 0x2c, 0x08, 0x17, 0x0a, 0x66, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, 0x03, 0x2e, 0x02, + 0x18, 0x1a, 0x59, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x2e, 0x20, 0x52, 0x61, + 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x68, 0x61, 0x76, + 0x65, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, + 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x69, 0x74, + 0x20, 0x64, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, 0x73, 0x20, 0x74, + 0x68, 0x65, 0x6d, 0x20, 0x62, 0x79, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x03, 0x02, 0x00, 0x05, 0x12, 0x03, 0x2e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, + 0x02, 0x00, 0x01, 0x12, 0x03, 0x2e, 0x09, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, + 0x03, 0x12, 0x03, 0x2e, 0x16, 0x17, 0x0a, 0x4d, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x01, 0x12, 0x03, + 0x30, 0x02, 0x15, 0x1a, 0x40, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x2e, 0x20, + 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, + 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x74, + 0x6f, 0x20, 0x31, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x05, 0x12, 0x03, + 0x30, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, 0x03, 0x30, 0x08, + 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x30, 0x13, 0x14, 0x0a, + 0x55, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x32, 0x02, 0x19, 0x1a, 0x48, 0x20, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x4d, 0x69, 0x6e, 0x20, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x4d, 0x69, 0x6e, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, + 0x74, 0x6f, 0x20, 0x31, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x05, 0x12, + 0x03, 0x32, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, 0x12, 0x03, 0x32, + 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, 0x32, 0x17, 0x18, + 0x0a, 0x5b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x03, 0x12, 0x03, 0x34, 0x02, 0x19, 0x1a, 0x4e, 0x20, + 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x4d, 0x61, 0x78, 0x20, 0x72, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, + 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x4d, 0x61, 0x78, 0x52, + 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, + 0x20, 0x74, 0x6f, 0x20, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x03, 0x05, 0x12, 0x03, 0x34, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x03, 0x02, 0x03, 0x01, 0x12, 0x03, 0x34, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, + 0x03, 0x03, 0x12, 0x03, 0x34, 0x17, 0x18, 0x0a, 0xb7, 0x01, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x04, + 0x12, 0x03, 0x37, 0x02, 0x2b, 0x1a, 0xa9, 0x01, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x2e, 0x20, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, 0x20, 0x63, 0x6f, + 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x2c, 0x20, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2d, 0x6d, 0x65, 0x6d, + 0x6f, 0x72, 0x79, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x66, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x72, 0x61, 0x79, 0x2e, + 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x72, 0x61, 0x79, + 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, 0x2d, 0x72, 0x65, + 0x66, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x61, 0x79, 0x2d, 0x73, 0x74, 0x61, 0x72, 0x74, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x06, 0x12, 0x03, 0x37, 0x02, 0x15, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x01, 0x12, 0x03, 0x37, 0x16, 0x26, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x03, 0x02, 0x04, 0x03, 0x12, 0x03, 0x37, 0x29, 0x2a, 0x0a, 0x2b, 0x0a, 0x04, 0x04, + 0x03, 0x02, 0x05, 0x12, 0x03, 0x39, 0x02, 0x1a, 0x1a, 0x1e, 0x20, 0x50, 0x6f, 0x64, 0x20, 0x53, + 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x61, 0x79, 0x20, 0x77, 0x6f, 0x72, 0x6b, + 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, + 0x06, 0x12, 0x03, 0x39, 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, 0x01, 0x12, + 0x03, 0x39, 0x0e, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, 0x03, 0x12, 0x03, 0x39, + 0x18, 0x19, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xf7, 0x14, 0x0a, 0x1d, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0x2f, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x1a, + 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, 0x72, 0x65, 0x2f, + 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, + 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x10, 0x53, 0x70, 0x61, + 0x72, 0x6b, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2e, 0x0a, + 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, 0x4f, 0x4e, 0x10, + 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x41, 0x56, 0x41, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x53, + 0x43, 0x41, 0x4c, 0x41, 0x10, 0x02, 0x12, 0x05, 0x0a, 0x01, 0x52, 0x10, 0x03, 0x22, 0xf1, 0x05, + 0x0a, 0x08, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, 0x62, 0x12, 0x52, 0x0a, 0x0f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x41, 0x70, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x52, 0x0f, 0x61, + 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x12, 0x30, + 0x0a, 0x13, 0x6d, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x6d, 0x61, 0x69, + 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, + 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x48, + 0x0a, 0x09, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x04, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, + 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, 0x62, 0x2e, 0x53, + 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x09, 0x73, + 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x4b, 0x0a, 0x0a, 0x68, 0x61, 0x64, 0x6f, + 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x2e, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, - 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x0e, 0x72, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x73, 0x12, 0x2f, 0x0a, 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, - 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, 0x38, 0x73, - 0x50, 0x6f, 0x64, 0x1a, 0x41, 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, - 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe8, 0x02, 0x0a, 0x0f, 0x57, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, - 0x6f, 0x75, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x67, 0x72, 0x6f, 0x75, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x69, 0x6e, 0x5f, 0x72, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, 0x6d, 0x69, 0x6e, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0b, - 0x6d, 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x60, 0x0a, 0x10, 0x72, - 0x61, 0x79, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x18, - 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, - 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x2e, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0e, 0x72, - 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x12, 0x2f, 0x0a, - 0x07, 0x6b, 0x38, 0x73, 0x5f, 0x70, 0x6f, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, - 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, - 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x06, 0x6b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x1a, 0x41, - 0x0a, 0x13, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, + 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, 0x62, 0x2e, 0x48, 0x61, 0x64, 0x6f, 0x6f, 0x70, + 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x68, 0x61, 0x64, 0x6f, 0x6f, + 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x50, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x78, 0x65, + 0x63, 0x75, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x74, 0x68, 0x12, 0x3f, 0x0a, 0x0e, 0x64, 0x61, 0x74, + 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0e, 0x64, 0x61, 0x74, 0x61, + 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x28, 0x0a, 0x0f, 0x64, 0x61, + 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, + 0x6b, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x50, 0x6f, + 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, + 0x09, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x64, 0x12, 0x38, 0x0a, 0x0b, 0x65, 0x78, + 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x50, 0x6f, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, + 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, + 0x72, 0x50, 0x6f, 0x64, 0x1a, 0x3c, 0x0a, 0x0e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, + 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, + 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x48, 0x61, 0x64, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, - 0x01, 0x42, 0xbd, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, - 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x08, 0x52, 0x61, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, - 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, - 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, - 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, - 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, - 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x73, 0x4a, 0x91, 0x15, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x3a, 0x01, 0x0a, 0x08, 0x0a, 0x01, - 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, - 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x08, 0x0a, 0x01, 0x08, - 0x12, 0x03, 0x06, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x06, 0x00, 0x4c, - 0x0a, 0x3c, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x16, 0x01, 0x1a, 0x30, 0x20, 0x52, - 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x53, 0x70, 0x65, 0x63, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, - 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x73, 0x74, - 0x61, 0x74, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, 0x0a, 0x0a, 0x0a, - 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x0e, 0x0a, 0x44, 0x0a, 0x04, 0x04, 0x00, - 0x02, 0x00, 0x12, 0x03, 0x0b, 0x02, 0x1d, 0x1a, 0x37, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x20, 0x69, 0x73, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, - 0x20, 0x74, 0x6f, 0x20, 0x72, 0x75, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x6a, 0x6f, 0x62, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x06, 0x12, 0x03, 0x0b, 0x02, 0x0c, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x0d, 0x18, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x0b, 0x1b, 0x1c, 0x0a, 0xa0, 0x01, 0x0a, 0x04, 0x04, - 0x00, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x02, 0x2d, 0x1a, 0x92, 0x01, 0x20, 0x72, 0x75, 0x6e, 0x74, - 0x69, 0x6d, 0x65, 0x5f, 0x65, 0x6e, 0x76, 0x20, 0x69, 0x73, 0x20, 0x62, 0x61, 0x73, 0x65, 0x36, - 0x34, 0x20, 0x65, 0x6e, 0x63, 0x6f, 0x64, 0x65, 0x64, 0x2e, 0x0a, 0x20, 0x52, 0x61, 0x79, 0x20, - 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, - 0x65, 0x6e, 0x74, 0x73, 0x3a, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, - 0x63, 0x73, 0x2e, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x74, 0x2f, 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x68, 0x61, 0x6e, - 0x64, 0x6c, 0x69, 0x6e, 0x67, 0x2d, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x65, 0x6e, 0x63, 0x69, - 0x65, 0x73, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x2d, - 0x65, 0x6e, 0x76, 0x69, 0x72, 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0e, 0x09, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x01, 0x03, 0x12, 0x03, 0x0e, 0x17, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x01, 0x08, - 0x12, 0x03, 0x0e, 0x19, 0x2c, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x02, 0x01, 0x08, 0x03, 0x12, - 0x03, 0x0e, 0x1a, 0x2b, 0x0a, 0x78, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x10, 0x02, - 0x27, 0x1a, 0x6b, 0x20, 0x73, 0x68, 0x75, 0x74, 0x64, 0x6f, 0x77, 0x6e, 0x5f, 0x61, 0x66, 0x74, - 0x65, 0x72, 0x5f, 0x6a, 0x6f, 0x62, 0x5f, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x20, - 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x77, 0x68, 0x65, 0x74, 0x68, 0x65, - 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x20, 0x73, 0x68, 0x6f, 0x75, 0x6c, 0x64, 0x20, 0x62, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, - 0x4a, 0x6f, 0x62, 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x10, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x10, 0x07, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x02, 0x03, 0x12, 0x03, 0x10, 0x25, 0x26, 0x0a, 0x90, 0x01, 0x0a, 0x04, 0x04, 0x00, 0x02, - 0x03, 0x12, 0x03, 0x12, 0x02, 0x27, 0x1a, 0x82, 0x01, 0x20, 0x74, 0x74, 0x6c, 0x5f, 0x73, 0x65, - 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x5f, 0x66, 0x69, 0x6e, 0x69, - 0x73, 0x68, 0x65, 0x64, 0x20, 0x73, 0x70, 0x65, 0x63, 0x69, 0x66, 0x69, 0x65, 0x73, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x73, 0x65, 0x63, - 0x6f, 0x6e, 0x64, 0x73, 0x20, 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x77, 0x68, 0x69, 0x63, 0x68, - 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, - 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x65, 0x20, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, - 0x61, 0x66, 0x74, 0x65, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x52, 0x61, 0x79, 0x4a, 0x6f, 0x62, - 0x20, 0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x00, 0x02, 0x03, 0x05, 0x12, 0x03, 0x12, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, - 0x03, 0x01, 0x12, 0x03, 0x12, 0x08, 0x22, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x03, - 0x12, 0x03, 0x12, 0x25, 0x26, 0x0a, 0x75, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x04, 0x12, 0x03, 0x15, - 0x02, 0x1e, 0x1a, 0x68, 0x20, 0x52, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x45, 0x6e, 0x76, 0x59, - 0x41, 0x4d, 0x4c, 0x20, 0x72, 0x65, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x72, 0x75, 0x6e, 0x74, 0x69, 0x6d, 0x65, 0x20, 0x65, 0x6e, 0x76, 0x69, 0x72, - 0x6f, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x0a, 0x20, 0x70, 0x72, 0x6f, 0x76, 0x69, 0x64, 0x65, 0x64, 0x20, 0x61, - 0x73, 0x20, 0x61, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x2d, 0x6c, 0x69, 0x6e, 0x65, 0x20, 0x59, - 0x41, 0x4d, 0x4c, 0x20, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x00, 0x02, 0x04, 0x05, 0x12, 0x03, 0x15, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, - 0x02, 0x04, 0x01, 0x12, 0x03, 0x15, 0x09, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x04, - 0x03, 0x12, 0x03, 0x15, 0x1c, 0x1d, 0x0a, 0x48, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x19, 0x00, - 0x20, 0x01, 0x1a, 0x3c, 0x20, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x20, 0x52, 0x61, 0x79, 0x20, - 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x73, 0x20, - 0x74, 0x68, 0x65, 0x20, 0x64, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x20, 0x6f, 0x66, 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x0a, - 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x19, 0x08, 0x12, 0x0a, 0x3b, 0x0a, 0x04, - 0x04, 0x01, 0x02, 0x00, 0x12, 0x03, 0x1b, 0x02, 0x24, 0x1a, 0x2e, 0x20, 0x48, 0x65, 0x61, 0x64, - 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x68, 0x65, 0x61, 0x64, 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x00, 0x06, 0x12, 0x03, 0x1b, 0x02, 0x0f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, - 0x12, 0x03, 0x1b, 0x10, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, - 0x1b, 0x22, 0x23, 0x0a, 0x41, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x1d, 0x02, 0x31, - 0x1a, 0x34, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, - 0x65, 0x63, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, - 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, - 0x20, 0x70, 0x6f, 0x64, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x04, 0x12, - 0x03, 0x1d, 0x02, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x06, 0x12, 0x03, 0x1d, - 0x0b, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x1d, 0x1b, 0x2c, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, 0x12, 0x03, 0x1d, 0x2f, 0x30, 0x0a, 0x2d, - 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x1f, 0x02, 0x1e, 0x1a, 0x20, 0x20, 0x57, 0x68, - 0x65, 0x74, 0x68, 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x20, - 0x61, 0x75, 0x74, 0x6f, 0x73, 0x63, 0x61, 0x6c, 0x69, 0x6e, 0x67, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x06, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x1f, 0x07, 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x02, 0x03, 0x12, 0x03, 0x1f, 0x1c, 0x1d, 0x0a, 0x39, 0x0a, 0x02, 0x04, 0x02, 0x12, 0x04, 0x23, - 0x00, 0x29, 0x01, 0x1a, 0x2d, 0x20, 0x48, 0x65, 0x61, 0x64, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x53, - 0x70, 0x65, 0x63, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x70, 0x65, 0x63, - 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x68, 0x65, 0x61, 0x64, 0x20, 0x70, 0x6f, - 0x64, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x02, 0x01, 0x12, 0x03, 0x23, 0x08, 0x15, 0x0a, 0xb7, - 0x01, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x00, 0x12, 0x03, 0x26, 0x02, 0x2b, 0x1a, 0xa9, 0x01, 0x20, - 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, - 0x72, 0x74, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, - 0x74, 0x61, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x20, 0x61, 0x64, - 0x64, 0x72, 0x65, 0x73, 0x73, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2d, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x2d, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x66, - 0x65, 0x72, 0x20, 0x74, 0x6f, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, - 0x63, 0x73, 0x2e, 0x72, 0x61, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, - 0x65, 0x73, 0x74, 0x2f, 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x61, 0x63, - 0x6b, 0x61, 0x67, 0x65, 0x2d, 0x72, 0x65, 0x66, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x61, - 0x79, 0x2d, 0x73, 0x74, 0x61, 0x72, 0x74, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, - 0x06, 0x12, 0x03, 0x26, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x01, 0x12, - 0x03, 0x26, 0x16, 0x26, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x00, 0x03, 0x12, 0x03, 0x26, - 0x29, 0x2a, 0x0a, 0x2c, 0x0a, 0x04, 0x04, 0x02, 0x02, 0x01, 0x12, 0x03, 0x28, 0x02, 0x1a, 0x1a, - 0x1f, 0x20, 0x50, 0x6f, 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, - 0x68, 0x65, 0x20, 0x72, 0x61, 0x79, 0x20, 0x68, 0x65, 0x61, 0x64, 0x20, 0x70, 0x6f, 0x64, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x06, 0x12, 0x03, 0x28, 0x02, 0x0d, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x02, 0x02, 0x01, 0x01, 0x12, 0x03, 0x28, 0x0e, 0x15, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x02, 0x02, 0x01, 0x03, 0x12, 0x03, 0x28, 0x18, 0x19, 0x0a, 0x3f, 0x0a, 0x02, 0x04, 0x03, - 0x12, 0x04, 0x2c, 0x00, 0x3a, 0x01, 0x1a, 0x33, 0x20, 0x57, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x47, - 0x72, 0x6f, 0x75, 0x70, 0x53, 0x70, 0x65, 0x63, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x73, 0x70, 0x65, 0x63, 0x73, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, - 0x03, 0x01, 0x12, 0x03, 0x2c, 0x08, 0x17, 0x0a, 0x66, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x00, 0x12, - 0x03, 0x2e, 0x02, 0x18, 0x1a, 0x59, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x2e, - 0x20, 0x52, 0x61, 0x79, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x63, 0x61, 0x6e, 0x20, - 0x68, 0x61, 0x76, 0x65, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x20, 0x77, 0x6f, - 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x73, 0x2c, 0x20, 0x61, 0x6e, 0x64, - 0x20, 0x69, 0x74, 0x20, 0x64, 0x69, 0x73, 0x74, 0x69, 0x6e, 0x67, 0x75, 0x69, 0x73, 0x68, 0x65, - 0x73, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x20, 0x62, 0x79, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x0a, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x00, 0x05, 0x12, 0x03, 0x2e, 0x02, 0x08, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x03, 0x02, 0x00, 0x01, 0x12, 0x03, 0x2e, 0x09, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x03, 0x02, 0x00, 0x03, 0x12, 0x03, 0x2e, 0x16, 0x17, 0x0a, 0x4d, 0x0a, 0x04, 0x04, 0x03, 0x02, - 0x01, 0x12, 0x03, 0x30, 0x02, 0x15, 0x1a, 0x40, 0x20, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x2e, 0x20, 0x44, 0x65, 0x73, 0x69, 0x72, 0x65, 0x64, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, - 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, - 0x73, 0x20, 0x74, 0x6f, 0x20, 0x31, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, - 0x05, 0x12, 0x03, 0x30, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x12, - 0x03, 0x30, 0x08, 0x10, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x01, 0x03, 0x12, 0x03, 0x30, - 0x13, 0x14, 0x0a, 0x55, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x02, 0x12, 0x03, 0x32, 0x02, 0x19, 0x1a, - 0x48, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x4d, 0x69, 0x6e, 0x20, - 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, - 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x4d, 0x69, - 0x6e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x31, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, - 0x02, 0x05, 0x12, 0x03, 0x32, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x01, - 0x12, 0x03, 0x32, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x02, 0x03, 0x12, 0x03, - 0x32, 0x17, 0x18, 0x0a, 0x5b, 0x0a, 0x04, 0x04, 0x03, 0x02, 0x03, 0x12, 0x03, 0x34, 0x02, 0x19, - 0x1a, 0x4e, 0x20, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x4d, 0x61, 0x78, - 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x67, 0x72, 0x6f, 0x75, 0x70, 0x2e, 0x20, 0x4d, - 0x61, 0x78, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x64, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x73, 0x20, 0x74, 0x6f, 0x20, 0x6d, 0x61, 0x78, 0x49, 0x6e, 0x74, 0x33, 0x32, 0x0a, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x05, 0x12, 0x03, 0x34, 0x02, 0x07, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x03, 0x02, 0x03, 0x01, 0x12, 0x03, 0x34, 0x08, 0x14, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x03, 0x02, 0x03, 0x03, 0x12, 0x03, 0x34, 0x17, 0x18, 0x0a, 0xb7, 0x01, 0x0a, 0x04, 0x04, - 0x03, 0x02, 0x04, 0x12, 0x03, 0x37, 0x02, 0x2b, 0x1a, 0xa9, 0x01, 0x20, 0x4f, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x2e, 0x20, 0x52, 0x61, 0x79, 0x53, 0x74, 0x61, 0x72, 0x74, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x73, 0x20, 0x61, 0x72, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x73, 0x20, 0x6f, 0x66, 0x20, 0x74, 0x68, 0x65, 0x20, 0x73, 0x74, 0x61, 0x72, 0x74, - 0x20, 0x63, 0x6f, 0x6d, 0x6d, 0x61, 0x6e, 0x64, 0x3a, 0x20, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x2c, 0x20, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2d, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x2d, - 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x2e, 0x0a, 0x20, 0x52, 0x65, 0x66, 0x65, 0x72, 0x20, 0x74, - 0x6f, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x72, - 0x61, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x65, 0x6e, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, - 0x72, 0x61, 0x79, 0x2d, 0x63, 0x6f, 0x72, 0x65, 0x2f, 0x70, 0x61, 0x63, 0x6b, 0x61, 0x67, 0x65, - 0x2d, 0x72, 0x65, 0x66, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, 0x72, 0x61, 0x79, 0x2d, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x06, 0x12, 0x03, 0x37, - 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x01, 0x12, 0x03, 0x37, 0x16, 0x26, - 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x04, 0x03, 0x12, 0x03, 0x37, 0x29, 0x2a, 0x0a, 0x2b, - 0x0a, 0x04, 0x04, 0x03, 0x02, 0x05, 0x12, 0x03, 0x39, 0x02, 0x1a, 0x1a, 0x1e, 0x20, 0x50, 0x6f, - 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x72, 0x61, 0x79, 0x20, 0x77, - 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x73, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x03, 0x02, 0x05, 0x06, 0x12, 0x03, 0x39, 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, - 0x05, 0x01, 0x12, 0x03, 0x39, 0x0e, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x03, 0x02, 0x05, 0x03, - 0x12, 0x03, 0x39, 0x18, 0x19, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xf7, 0x14, - 0x0a, 0x1d, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x2f, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x11, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, - 0x6e, 0x73, 0x1a, 0x1a, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x63, 0x6f, - 0x72, 0x65, 0x2f, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, - 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x42, 0x0a, 0x10, - 0x53, 0x70, 0x61, 0x72, 0x6b, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x2e, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, 0x06, 0x50, 0x59, 0x54, 0x48, - 0x4f, 0x4e, 0x10, 0x00, 0x12, 0x08, 0x0a, 0x04, 0x4a, 0x41, 0x56, 0x41, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x53, 0x43, 0x41, 0x4c, 0x41, 0x10, 0x02, 0x12, 0x05, 0x0a, 0x01, 0x52, 0x10, 0x03, - 0x22, 0xf1, 0x05, 0x0a, 0x08, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, 0x62, 0x12, 0x52, 0x0a, - 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x28, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, - 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, - 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x0f, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x30, 0x0a, 0x13, 0x6d, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x69, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, - 0x6d, 0x61, 0x69, 0x6e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x46, - 0x69, 0x6c, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6c, 0x61, 0x73, 0x73, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x61, 0x69, 0x6e, 0x43, 0x6c, 0x61, 0x73, - 0x73, 0x12, 0x48, 0x0a, 0x09, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x04, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, - 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, - 0x62, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x09, 0x73, 0x70, 0x61, 0x72, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x4b, 0x0a, 0x0a, 0x68, - 0x61, 0x64, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x2b, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0x2e, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x4a, 0x6f, 0x62, 0x2e, 0x48, 0x61, 0x64, - 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x68, 0x61, - 0x64, 0x6f, 0x6f, 0x70, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x22, 0x0a, 0x0c, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, - 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x74, 0x68, 0x12, 0x3f, 0x0a, 0x0e, - 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x18, 0x07, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x0e, 0x64, - 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x12, 0x28, 0x0a, - 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, - 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, - 0x6b, 0x73, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, - 0x72, 0x69, 0x63, 0x6b, 0x73, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x09, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x12, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x09, 0x64, 0x72, 0x69, 0x76, 0x65, - 0x72, 0x50, 0x6f, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, - 0x6f, 0x64, 0x52, 0x09, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x50, 0x6f, 0x64, 0x12, 0x38, 0x0a, - 0x0b, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x50, 0x6f, 0x64, 0x18, 0x0b, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x63, - 0x6f, 0x72, 0x65, 0x2e, 0x4b, 0x38, 0x73, 0x50, 0x6f, 0x64, 0x52, 0x0b, 0x65, 0x78, 0x65, 0x63, - 0x75, 0x74, 0x6f, 0x72, 0x50, 0x6f, 0x64, 0x1a, 0x3c, 0x0a, 0x0e, 0x53, 0x70, 0x61, 0x72, 0x6b, - 0x43, 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x3d, 0x0a, 0x0f, 0x48, 0x61, 0x64, 0x6f, 0x6f, 0x70, 0x43, - 0x6f, 0x6e, 0x66, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, - 0x3a, 0x02, 0x38, 0x01, 0x42, 0xbf, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, - 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0a, - 0x53, 0x70, 0x61, 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, - 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, - 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, - 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, - 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, - 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, - 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, - 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, - 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, - 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0x86, 0x0c, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x29, - 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, - 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, - 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x26, 0x0a, 0x08, 0x0a, 0x01, 0x08, - 0x12, 0x03, 0x07, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x07, 0x00, 0x4c, - 0x0a, 0x0a, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x10, 0x01, 0x0a, 0x0a, 0x0a, 0x03, - 0x04, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x18, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x00, 0x04, 0x00, - 0x12, 0x04, 0x0a, 0x02, 0x0f, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x04, 0x00, 0x01, 0x12, - 0x03, 0x0a, 0x07, 0x0b, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, - 0x0b, 0x04, 0x0f, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, - 0x0b, 0x04, 0x0a, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, - 0x0b, 0x0d, 0x0e, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0c, - 0x04, 0x0d, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0c, - 0x04, 0x08, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x0c, - 0x0b, 0x0c, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0d, 0x04, - 0x0e, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0d, 0x04, - 0x09, 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x0d, 0x0c, - 0x0d, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x0e, 0x04, 0x0a, - 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x0e, 0x04, 0x05, - 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, 0x0e, 0x08, 0x09, - 0x0a, 0x2c, 0x0a, 0x02, 0x04, 0x01, 0x12, 0x04, 0x13, 0x00, 0x29, 0x01, 0x1a, 0x20, 0x20, 0x43, - 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, - 0x53, 0x70, 0x61, 0x72, 0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x0a, 0x0a, 0x0a, - 0x0a, 0x03, 0x04, 0x01, 0x01, 0x12, 0x03, 0x13, 0x08, 0x10, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, - 0x02, 0x00, 0x12, 0x03, 0x14, 0x02, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, - 0x12, 0x03, 0x14, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, - 0x14, 0x18, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x14, 0x2a, - 0x2b, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x15, 0x02, 0x21, 0x0a, 0x0c, - 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x15, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x01, 0x02, 0x01, 0x01, 0x12, 0x03, 0x15, 0x09, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, - 0x02, 0x01, 0x03, 0x12, 0x03, 0x15, 0x1f, 0x20, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, - 0x12, 0x03, 0x16, 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, - 0x16, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x16, 0x09, - 0x12, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x16, 0x15, 0x16, 0x0a, - 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x17, 0x02, 0x24, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x01, 0x02, 0x03, 0x06, 0x12, 0x03, 0x17, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, - 0x02, 0x03, 0x01, 0x12, 0x03, 0x17, 0x16, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, - 0x03, 0x12, 0x03, 0x17, 0x22, 0x23, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, - 0x18, 0x02, 0x25, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x06, 0x12, 0x03, 0x18, 0x02, - 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x18, 0x16, 0x20, 0x0a, - 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, 0x18, 0x23, 0x24, 0x0a, 0x2d, 0x0a, - 0x04, 0x04, 0x01, 0x02, 0x05, 0x12, 0x03, 0x19, 0x02, 0x1a, 0x22, 0x20, 0x20, 0x45, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x70, 0x61, 0x74, 0x68, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x50, - 0x79, 0x74, 0x68, 0x6f, 0x6e, 0x20, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, - 0x04, 0x01, 0x02, 0x05, 0x05, 0x12, 0x03, 0x19, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, - 0x02, 0x05, 0x01, 0x12, 0x03, 0x19, 0x09, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, - 0x03, 0x12, 0x03, 0x19, 0x18, 0x19, 0x0a, 0x9d, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x06, 0x12, - 0x03, 0x1c, 0x02, 0x2c, 0x1a, 0x8f, 0x01, 0x20, 0x44, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, - 0x6b, 0x73, 0x20, 0x6a, 0x6f, 0x62, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x73, 0x74, - 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, - 0x6f, 0x75, 0x6e, 0x64, 0x20, 0x68, 0x65, 0x72, 0x65, 0x2e, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, - 0x6b, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x32, 0x2e, 0x30, 0x2f, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x68, 0x74, - 0x6d, 0x6c, 0x23, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x75, 0x63, - 0x74, 0x75, 0x72, 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x06, 0x12, - 0x03, 0x1c, 0x02, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x01, 0x12, 0x03, 0x1c, - 0x19, 0x27, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x03, 0x12, 0x03, 0x1c, 0x2a, 0x2b, - 0x0a, 0xaa, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x07, 0x12, 0x03, 0x1f, 0x02, 0x1d, 0x1a, 0x9c, - 0x01, 0x20, 0x44, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x20, 0x61, 0x63, 0x63, - 0x65, 0x73, 0x73, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, - 0x3a, 0x2f, 0x2f, 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, - 0x6b, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x73, - 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x0a, - 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, - 0x62, 0x65, 0x20, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6e, 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, - 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x20, - 0x6f, 0x72, 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6b, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, - 0x05, 0x04, 0x01, 0x02, 0x07, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x07, 0x01, 0x12, 0x03, 0x1f, 0x09, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x07, 0x03, 0x12, 0x03, 0x1f, 0x1b, 0x1c, 0x0a, 0xa1, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x08, - 0x12, 0x03, 0x22, 0x02, 0x20, 0x1a, 0x93, 0x01, 0x20, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x20, - 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x6f, 0x66, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x64, 0x65, 0x70, - 0x6c, 0x6f, 0x79, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, - 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x3c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3e, 0x2e, - 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, - 0x2e, 0x63, 0x6f, 0x6d, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, - 0x20, 0x73, 0x65, 0x74, 0x20, 0x69, 0x6e, 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x66, - 0x6c, 0x79, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x6f, 0x72, - 0x20, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6b, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, - 0x01, 0x02, 0x08, 0x05, 0x12, 0x03, 0x22, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, - 0x08, 0x01, 0x12, 0x03, 0x22, 0x09, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, 0x03, - 0x12, 0x03, 0x22, 0x1e, 0x1f, 0x0a, 0x30, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x09, 0x12, 0x03, 0x25, - 0x02, 0x1d, 0x1a, 0x23, 0x20, 0x50, 0x6f, 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, - 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x20, 0x64, 0x72, 0x69, 0x76, - 0x65, 0x72, 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x06, - 0x12, 0x03, 0x25, 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x01, 0x12, 0x03, - 0x25, 0x0e, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x03, 0x12, 0x03, 0x25, 0x1a, - 0x1c, 0x0a, 0x32, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0a, 0x12, 0x03, 0x28, 0x02, 0x1f, 0x1a, 0x25, - 0x20, 0x50, 0x6f, 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, - 0x65, 0x20, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, - 0x20, 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x06, 0x12, 0x03, - 0x28, 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x01, 0x12, 0x03, 0x28, 0x0e, - 0x19, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x03, 0x12, 0x03, 0x28, 0x1c, 0x1e, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x01, 0x42, 0xbf, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, 0x0a, 0x53, 0x70, 0x61, + 0x72, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, + 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, + 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, + 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, + 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0x4a, 0x86, 0x0c, 0x0a, 0x06, 0x12, 0x04, 0x00, 0x00, 0x29, 0x01, 0x0a, 0x08, + 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, + 0x00, 0x1a, 0x0a, 0x09, 0x0a, 0x02, 0x03, 0x00, 0x12, 0x03, 0x04, 0x00, 0x24, 0x0a, 0x09, 0x0a, + 0x02, 0x03, 0x01, 0x12, 0x03, 0x05, 0x00, 0x26, 0x0a, 0x08, 0x0a, 0x01, 0x08, 0x12, 0x03, 0x07, + 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x07, 0x00, 0x4c, 0x0a, 0x0a, 0x0a, + 0x02, 0x04, 0x00, 0x12, 0x04, 0x09, 0x00, 0x10, 0x01, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, + 0x12, 0x03, 0x09, 0x08, 0x18, 0x0a, 0x0c, 0x0a, 0x04, 0x04, 0x00, 0x04, 0x00, 0x12, 0x04, 0x0a, + 0x02, 0x0f, 0x03, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x04, 0x00, 0x01, 0x12, 0x03, 0x0a, 0x07, + 0x0b, 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x0b, 0x04, 0x0f, + 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x0b, 0x04, 0x0a, + 0x0a, 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x00, 0x02, 0x12, 0x03, 0x0b, 0x0d, 0x0e, + 0x0a, 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x12, 0x03, 0x0c, 0x04, 0x0d, 0x0a, + 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0c, 0x04, 0x08, 0x0a, + 0x0e, 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x01, 0x02, 0x12, 0x03, 0x0c, 0x0b, 0x0c, 0x0a, + 0x0d, 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x12, 0x03, 0x0d, 0x04, 0x0e, 0x0a, 0x0e, + 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0d, 0x04, 0x09, 0x0a, 0x0e, + 0x0a, 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x02, 0x02, 0x12, 0x03, 0x0d, 0x0c, 0x0d, 0x0a, 0x0d, + 0x0a, 0x06, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x0e, 0x04, 0x0a, 0x0a, 0x0e, 0x0a, + 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x0e, 0x04, 0x05, 0x0a, 0x0e, 0x0a, + 0x07, 0x04, 0x00, 0x04, 0x00, 0x02, 0x03, 0x02, 0x12, 0x03, 0x0e, 0x08, 0x09, 0x0a, 0x2c, 0x0a, + 0x02, 0x04, 0x01, 0x12, 0x04, 0x13, 0x00, 0x29, 0x01, 0x1a, 0x20, 0x20, 0x43, 0x75, 0x73, 0x74, + 0x6f, 0x6d, 0x20, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x53, 0x70, 0x61, + 0x72, 0x6b, 0x20, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x2e, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, + 0x01, 0x01, 0x12, 0x03, 0x13, 0x08, 0x10, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x00, 0x12, + 0x03, 0x14, 0x02, 0x2c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x06, 0x12, 0x03, 0x14, + 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x01, 0x12, 0x03, 0x14, 0x18, 0x27, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x00, 0x03, 0x12, 0x03, 0x14, 0x2a, 0x2b, 0x0a, 0x0b, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x01, 0x12, 0x03, 0x15, 0x02, 0x21, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x01, 0x02, 0x01, 0x05, 0x12, 0x03, 0x15, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x01, 0x01, 0x12, 0x03, 0x15, 0x09, 0x1c, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x01, 0x03, + 0x12, 0x03, 0x15, 0x1f, 0x20, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x02, 0x12, 0x03, 0x16, + 0x02, 0x17, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x05, 0x12, 0x03, 0x16, 0x02, 0x08, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x01, 0x12, 0x03, 0x16, 0x09, 0x12, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x02, 0x03, 0x12, 0x03, 0x16, 0x15, 0x16, 0x0a, 0x0b, 0x0a, 0x04, + 0x04, 0x01, 0x02, 0x03, 0x12, 0x03, 0x17, 0x02, 0x24, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x03, 0x06, 0x12, 0x03, 0x17, 0x02, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x01, + 0x12, 0x03, 0x17, 0x16, 0x1f, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x03, 0x03, 0x12, 0x03, + 0x17, 0x22, 0x23, 0x0a, 0x0b, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x04, 0x12, 0x03, 0x18, 0x02, 0x25, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x06, 0x12, 0x03, 0x18, 0x02, 0x15, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x04, 0x01, 0x12, 0x03, 0x18, 0x16, 0x20, 0x0a, 0x0c, 0x0a, 0x05, + 0x04, 0x01, 0x02, 0x04, 0x03, 0x12, 0x03, 0x18, 0x23, 0x24, 0x0a, 0x2d, 0x0a, 0x04, 0x04, 0x01, + 0x02, 0x05, 0x12, 0x03, 0x19, 0x02, 0x1a, 0x22, 0x20, 0x20, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, + 0x6f, 0x72, 0x20, 0x70, 0x61, 0x74, 0x68, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x50, 0x79, 0x74, 0x68, + 0x6f, 0x6e, 0x20, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, + 0x05, 0x05, 0x12, 0x03, 0x19, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x01, + 0x12, 0x03, 0x19, 0x09, 0x15, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x05, 0x03, 0x12, 0x03, + 0x19, 0x18, 0x19, 0x0a, 0x9d, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x06, 0x12, 0x03, 0x1c, 0x02, + 0x2c, 0x1a, 0x8f, 0x01, 0x20, 0x44, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x20, + 0x6a, 0x6f, 0x62, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x2e, 0x0a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x73, 0x74, 0x72, 0x75, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x66, 0x6f, 0x75, 0x6e, + 0x64, 0x20, 0x68, 0x65, 0x72, 0x65, 0x2e, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x32, 0x2e, 0x30, 0x2f, 0x6a, 0x6f, 0x62, 0x73, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x23, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2d, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x75, 0x72, + 0x65, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x06, 0x12, 0x03, 0x1c, 0x02, + 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x01, 0x12, 0x03, 0x1c, 0x19, 0x27, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x06, 0x03, 0x12, 0x03, 0x1c, 0x2a, 0x2b, 0x0a, 0xaa, 0x01, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x07, 0x12, 0x03, 0x1f, 0x02, 0x1d, 0x1a, 0x9c, 0x01, 0x20, 0x44, + 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x20, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, + 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2e, 0x20, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, + 0x64, 0x6f, 0x63, 0x73, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x64, 0x65, 0x76, 0x2d, 0x74, 0x6f, 0x6f, 0x6c, 0x73, 0x2f, 0x61, 0x70, + 0x69, 0x2f, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x68, 0x74, 0x6d, 0x6c, 0x0a, 0x20, 0x54, 0x68, + 0x69, 0x73, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, + 0x73, 0x65, 0x74, 0x20, 0x69, 0x6e, 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x6f, 0x72, 0x20, + 0x66, 0x6c, 0x79, 0x74, 0x65, 0x6b, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, + 0x02, 0x07, 0x05, 0x12, 0x03, 0x1f, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x07, + 0x01, 0x12, 0x03, 0x1f, 0x09, 0x18, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x07, 0x03, 0x12, + 0x03, 0x1f, 0x1b, 0x1c, 0x0a, 0xa1, 0x01, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x08, 0x12, 0x03, 0x22, + 0x02, 0x20, 0x1a, 0x93, 0x01, 0x20, 0x44, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x6e, 0x61, 0x6d, + 0x65, 0x20, 0x6f, 0x66, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x64, 0x65, 0x70, 0x6c, 0x6f, 0x79, + 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x20, 0x55, 0x73, 0x65, 0x20, 0x74, 0x68, 0x65, 0x20, 0x66, 0x6f, + 0x72, 0x6d, 0x20, 0x3c, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x3e, 0x2e, 0x63, 0x6c, 0x6f, + 0x75, 0x64, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x62, 0x72, 0x69, 0x63, 0x6b, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2e, 0x0a, 0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x62, 0x65, 0x20, 0x73, 0x65, + 0x74, 0x20, 0x69, 0x6e, 0x20, 0x65, 0x69, 0x74, 0x68, 0x65, 0x72, 0x20, 0x66, 0x6c, 0x79, 0x74, + 0x65, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x6f, 0x72, 0x20, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x6b, 0x69, 0x74, 0x2e, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, + 0x05, 0x12, 0x03, 0x22, 0x02, 0x08, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, 0x01, 0x12, + 0x03, 0x22, 0x09, 0x1b, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x08, 0x03, 0x12, 0x03, 0x22, + 0x1e, 0x1f, 0x0a, 0x30, 0x0a, 0x04, 0x04, 0x01, 0x02, 0x09, 0x12, 0x03, 0x25, 0x02, 0x1d, 0x1a, + 0x23, 0x20, 0x50, 0x6f, 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, + 0x68, 0x65, 0x20, 0x53, 0x70, 0x61, 0x72, 0x6b, 0x20, 0x64, 0x72, 0x69, 0x76, 0x65, 0x72, 0x20, + 0x70, 0x6f, 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x06, 0x12, 0x03, 0x25, + 0x02, 0x0d, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x01, 0x12, 0x03, 0x25, 0x0e, 0x17, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x09, 0x03, 0x12, 0x03, 0x25, 0x1a, 0x1c, 0x0a, 0x32, + 0x0a, 0x04, 0x04, 0x01, 0x02, 0x0a, 0x12, 0x03, 0x28, 0x02, 0x1f, 0x1a, 0x25, 0x20, 0x50, 0x6f, + 0x64, 0x20, 0x53, 0x70, 0x65, 0x63, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x65, 0x20, 0x53, + 0x70, 0x61, 0x72, 0x6b, 0x20, 0x65, 0x78, 0x65, 0x63, 0x75, 0x74, 0x6f, 0x72, 0x20, 0x70, 0x6f, + 0x64, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x06, 0x12, 0x03, 0x28, 0x02, 0x0d, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x01, 0x12, 0x03, 0x28, 0x0e, 0x19, 0x0a, 0x0c, + 0x0a, 0x05, 0x04, 0x01, 0x02, 0x0a, 0x03, 0x12, 0x03, 0x28, 0x1c, 0x1e, 0x62, 0x06, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x33, 0x0a, 0xea, 0x08, 0x0a, 0x22, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, + 0x6c, 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x2f, 0x74, 0x65, 0x6e, 0x73, 0x6f, + 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x11, 0x66, 0x6c, 0x79, + 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x22, 0xb4, + 0x01, 0x0a, 0x21, 0x44, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x54, 0x65, + 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x54, 0x61, 0x73, 0x6b, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x07, 0x77, 0x6f, 0x72, 0x6b, 0x65, 0x72, 0x73, 0x12, 0x1f, + 0x0a, 0x0b, 0x70, 0x73, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x0a, 0x70, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, + 0x25, 0x0a, 0x0e, 0x63, 0x68, 0x69, 0x65, 0x66, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0d, 0x63, 0x68, 0x69, 0x65, 0x66, 0x52, 0x65, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, + 0x74, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x11, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x73, 0x42, 0xc4, 0x01, 0x0a, 0x15, 0x63, 0x6f, 0x6d, 0x2e, 0x66, 0x6c, + 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x42, + 0x0f, 0x54, 0x65, 0x6e, 0x73, 0x6f, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x50, 0x72, 0x6f, 0x74, 0x6f, + 0x50, 0x01, 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x66, + 0x6c, 0x79, 0x74, 0x65, 0x6f, 0x72, 0x67, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x2f, 0x76, 0x32, + 0x2f, 0x67, 0x65, 0x6e, 0x2f, 0x67, 0x6f, 0x2f, 0x66, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, + 0x32, 0x2f, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xa2, 0x02, 0x03, 0x46, 0x50, 0x58, 0xaa, + 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x2e, 0x50, 0x6c, 0x75, 0x67, + 0x69, 0x6e, 0x73, 0xca, 0x02, 0x11, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, 0x64, 0x6c, 0x32, 0x5c, + 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0xe2, 0x02, 0x1d, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x5c, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x5c, 0x47, 0x50, 0x42, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xea, 0x02, 0x12, 0x46, 0x6c, 0x79, 0x74, 0x65, 0x69, + 0x64, 0x6c, 0x32, 0x3a, 0x3a, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x73, 0x4a, 0xaa, 0x05, 0x0a, + 0x06, 0x12, 0x04, 0x00, 0x00, 0x11, 0x01, 0x0a, 0x08, 0x0a, 0x01, 0x0c, 0x12, 0x03, 0x00, 0x00, + 0x12, 0x0a, 0x08, 0x0a, 0x01, 0x02, 0x12, 0x03, 0x02, 0x00, 0x1a, 0x0a, 0x08, 0x0a, 0x01, 0x08, + 0x12, 0x03, 0x04, 0x00, 0x4c, 0x0a, 0x09, 0x0a, 0x02, 0x08, 0x0b, 0x12, 0x03, 0x04, 0x00, 0x4c, + 0x0a, 0x75, 0x0a, 0x02, 0x04, 0x00, 0x12, 0x04, 0x07, 0x00, 0x11, 0x01, 0x1a, 0x69, 0x20, 0x43, + 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x20, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x20, 0x66, 0x6f, 0x72, 0x20, + 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x65, 0x6e, 0x61, 0x62, + 0x6c, 0x65, 0x73, 0x20, 0x64, 0x69, 0x73, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x64, 0x20, + 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x20, 0x75, 0x73, 0x69, 0x6e, 0x67, 0x20, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x6b, 0x75, 0x62, 0x65, 0x66, 0x6c, 0x6f, 0x77, 0x2f, 0x74, 0x66, 0x2d, 0x6f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x0a, 0x0a, 0x0a, 0x0a, 0x03, 0x04, 0x00, 0x01, 0x12, 0x03, + 0x07, 0x08, 0x29, 0x0a, 0x4c, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x00, 0x12, 0x03, 0x09, 0x02, 0x14, + 0x1a, 0x3f, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x77, 0x6f, 0x72, + 0x6b, 0x65, 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x61, + 0x77, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, + 0x74, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, + 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x05, 0x12, 0x03, 0x09, 0x02, 0x07, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x00, 0x01, 0x12, 0x03, 0x09, 0x08, 0x0f, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x00, 0x03, 0x12, 0x03, 0x09, 0x12, 0x13, 0x0a, 0x60, 0x0a, 0x04, 0x04, + 0x00, 0x02, 0x01, 0x12, 0x03, 0x0c, 0x02, 0x18, 0x1a, 0x53, 0x20, 0x50, 0x53, 0x20, 0x2d, 0x3e, + 0x20, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, + 0x72, 0x0a, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x70, 0x73, 0x20, + 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x61, 0x77, 0x6e, 0x65, 0x64, + 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, + 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x01, 0x05, 0x12, 0x03, 0x0c, 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x01, 0x01, 0x12, 0x03, 0x0c, 0x08, 0x13, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, + 0x01, 0x03, 0x12, 0x03, 0x0c, 0x16, 0x17, 0x0a, 0x4b, 0x0a, 0x04, 0x04, 0x00, 0x02, 0x02, 0x12, + 0x03, 0x0e, 0x02, 0x1b, 0x1a, 0x3e, 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, + 0x20, 0x63, 0x68, 0x69, 0x65, 0x66, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, + 0x73, 0x70, 0x61, 0x77, 0x6e, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, + 0x6a, 0x6f, 0x62, 0x0a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x05, 0x12, 0x03, 0x0e, + 0x02, 0x07, 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x01, 0x12, 0x03, 0x0e, 0x08, 0x16, + 0x0a, 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x02, 0x03, 0x12, 0x03, 0x0e, 0x19, 0x1a, 0x0a, 0x4f, + 0x0a, 0x04, 0x04, 0x00, 0x02, 0x03, 0x12, 0x03, 0x10, 0x02, 0x1f, 0x1a, 0x42, 0x20, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x20, 0x6f, 0x66, 0x20, 0x65, 0x76, 0x61, 0x6c, 0x75, 0x61, 0x74, 0x6f, + 0x72, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x73, 0x20, 0x73, 0x70, 0x61, 0x77, 0x6e, + 0x65, 0x64, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x74, 0x68, 0x69, 0x73, 0x20, 0x6a, 0x6f, 0x62, 0x0a, 0x0a, + 0x0c, 0x0a, 0x05, 0x04, 0x00, 0x02, 0x03, 0x05, 0x12, 0x03, 0x10, 0x02, 0x07, 0x0a, 0x0c, 0x0a, + 0x05, 0x04, 0x00, 0x02, 0x03, 0x01, 0x12, 0x03, 0x10, 0x08, 0x1a, 0x0a, 0x0c, 0x0a, 0x05, 0x04, + 0x00, 0x02, 0x03, 0x03, 0x12, 0x03, 0x10, 0x1d, 0x1e, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, ]; // @@protoc_insertion_point(module) \ No newline at end of file diff --git a/gen/ts/flyteidl2/cacheservice/cacheservice_pb.ts b/gen/ts/flyteidl2/cacheservice/cacheservice_pb.ts new file mode 100644 index 0000000000..9939d89ec6 --- /dev/null +++ b/gen/ts/flyteidl2/cacheservice/cacheservice_pb.ts @@ -0,0 +1,506 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/cacheservice/cacheservice.proto (package flyteidl2.cacheservice, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Identifier } from "../core/identifier_pb.ts"; +import { file_flyteidl2_core_identifier } from "../core/identifier_pb.ts"; +import type { LiteralMap } from "../core/literals_pb.ts"; +import { file_flyteidl2_core_literals } from "../core/literals_pb.ts"; +import type { Duration, Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_duration, file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/cacheservice/cacheservice.proto. + */ +export const file_flyteidl2_cacheservice_cacheservice: GenFile = /*@__PURE__*/ + fileDesc("CilmbHl0ZWlkbDIvY2FjaGVzZXJ2aWNlL2NhY2hlc2VydmljZS5wcm90bxIWZmx5dGVpZGwyLmNhY2hlc2VydmljZSKDAQoOS2V5TWFwTWV0YWRhdGESQgoGdmFsdWVzGAEgAygLMjIuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5LZXlNYXBNZXRhZGF0YS5WYWx1ZXNFbnRyeRotCgtWYWx1ZXNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBIt8BCghNZXRhZGF0YRI1ChFzb3VyY2VfaWRlbnRpZmllchgBIAEoCzIaLmZseXRlaWRsMi5jb3JlLklkZW50aWZpZXISNwoHa2V5X21hcBgCIAEoCzImLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuS2V5TWFwTWV0YWRhdGESLgoKY3JlYXRlZF9hdBgDIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASMwoPbGFzdF91cGRhdGVkX2F0GAQgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcCKZAQoMQ2FjaGVkT3V0cHV0EjUKD291dHB1dF9saXRlcmFscxgBIAEoCzIaLmZseXRlaWRsMi5jb3JlLkxpdGVyYWxNYXBIABIUCgpvdXRwdXRfdXJpGAIgASgJSAASMgoIbWV0YWRhdGEYAyABKAsyIC5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLk1ldGFkYXRhQggKBm91dHB1dCIeCg9HZXRDYWNoZVJlcXVlc3QSCwoDa2V5GAEgASgJIkgKEEdldENhY2hlUmVzcG9uc2USNAoGb3V0cHV0GAEgASgLMiQuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5DYWNoZWRPdXRwdXQiZQoPT3ZlcndyaXRlT3V0cHV0EhEKCW92ZXJ3cml0ZRgBIAEoCBITCgtkZWxldGVfYmxvYhgCIAEoCBIqCgdtYXhfYWdlGAMgASgLMhkuZ29vZ2xlLnByb3RvYnVmLkR1cmF0aW9uIpABCg9QdXRDYWNoZVJlcXVlc3QSCwoDa2V5GAEgASgJEjQKBm91dHB1dBgCIAEoCzIkLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuQ2FjaGVkT3V0cHV0EjoKCW92ZXJ3cml0ZRgDIAEoCzInLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuT3ZlcndyaXRlT3V0cHV0IhIKEFB1dENhY2hlUmVzcG9uc2UiIQoSRGVsZXRlQ2FjaGVSZXF1ZXN0EgsKA2tleRgBIAEoCSIVChNEZWxldGVDYWNoZVJlc3BvbnNlIpMBCgtSZXNlcnZhdGlvbhILCgNrZXkYASABKAkSEAoIb3duZXJfaWQYAiABKAkSNQoSaGVhcnRiZWF0X2ludGVydmFsGAMgASgLMhkuZ29vZ2xlLnByb3RvYnVmLkR1cmF0aW9uEi4KCmV4cGlyZXNfYXQYBCABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wInUKHUdldE9yRXh0ZW5kUmVzZXJ2YXRpb25SZXF1ZXN0EgsKA2tleRgBIAEoCRIQCghvd25lcl9pZBgCIAEoCRI1ChJoZWFydGJlYXRfaW50ZXJ2YWwYAyABKAsyGS5nb29nbGUucHJvdG9idWYuRHVyYXRpb24iWgoeR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlc3BvbnNlEjgKC3Jlc2VydmF0aW9uGAEgASgLMiMuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5SZXNlcnZhdGlvbiI6ChlSZWxlYXNlUmVzZXJ2YXRpb25SZXF1ZXN0EgsKA2tleRgBIAEoCRIQCghvd25lcl9pZBgCIAEoCSIcChpSZWxlYXNlUmVzZXJ2YXRpb25SZXNwb25zZTKsBAoMQ2FjaGVTZXJ2aWNlElgKA0dldBInLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuR2V0Q2FjaGVSZXF1ZXN0GiguZmx5dGVpZGwyLmNhY2hlc2VydmljZS5HZXRDYWNoZVJlc3BvbnNlElgKA1B1dBInLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuUHV0Q2FjaGVSZXF1ZXN0GiguZmx5dGVpZGwyLmNhY2hlc2VydmljZS5QdXRDYWNoZVJlc3BvbnNlEmEKBkRlbGV0ZRIqLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuRGVsZXRlQ2FjaGVSZXF1ZXN0GisuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5EZWxldGVDYWNoZVJlc3BvbnNlEocBChZHZXRPckV4dGVuZFJlc2VydmF0aW9uEjUuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5HZXRPckV4dGVuZFJlc2VydmF0aW9uUmVxdWVzdBo2LmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlc3BvbnNlEnsKElJlbGVhc2VSZXNlcnZhdGlvbhIxLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuUmVsZWFzZVJlc2VydmF0aW9uUmVxdWVzdBoyLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuUmVsZWFzZVJlc2VydmF0aW9uUmVzcG9uc2VC5gEKGmNvbS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlQhFDYWNoZXNlcnZpY2VQcm90b0gCUAFaOmdpdGh1Yi5jb20vZmx5dGVvcmcvZmx5dGUvdjIvZ2VuL2dvL2ZseXRlaWRsMi9jYWNoZXNlcnZpY2WiAgNGQ1iqAhZGbHl0ZWlkbDIuQ2FjaGVzZXJ2aWNlygIWRmx5dGVpZGwyXENhY2hlc2VydmljZeICIkZseXRlaWRsMlxDYWNoZXNlcnZpY2VcR1BCTWV0YWRhdGHqAhdGbHl0ZWlkbDI6OkNhY2hlc2VydmljZWIGcHJvdG8z", [file_flyteidl2_core_identifier, file_flyteidl2_core_literals, file_google_protobuf_duration, file_google_protobuf_timestamp]); + +/** + * + * Additional metadata as key-value pairs + * + * @generated from message flyteidl2.cacheservice.KeyMapMetadata + */ +export type KeyMapMetadata = Message<"flyteidl2.cacheservice.KeyMapMetadata"> & { + /** + * Additional metadata as key-value pairs + * + * @generated from field: map values = 1; + */ + values: { [key: string]: string }; +}; + +/** + * Describes the message flyteidl2.cacheservice.KeyMapMetadata. + * Use `create(KeyMapMetadataSchema)` to create a new message. + */ +export const KeyMapMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 0); + +/** + * + * Metadata for cached outputs, including the source identifier and timestamps. + * + * @generated from message flyteidl2.cacheservice.Metadata + */ +export type Metadata = Message<"flyteidl2.cacheservice.Metadata"> & { + /** + * Source task or workflow identifier + * + * @generated from field: flyteidl2.core.Identifier source_identifier = 1; + */ + sourceIdentifier?: Identifier; + + /** + * Additional metadata as key-value pairs + * + * @generated from field: flyteidl2.cacheservice.KeyMapMetadata key_map = 2; + */ + keyMap?: KeyMapMetadata; + + /** + * Creation timestamp + * + * @generated from field: google.protobuf.Timestamp created_at = 3; + */ + createdAt?: Timestamp; + + /** + * Last update timestamp + * + * @generated from field: google.protobuf.Timestamp last_updated_at = 4; + */ + lastUpdatedAt?: Timestamp; +}; + +/** + * Describes the message flyteidl2.cacheservice.Metadata. + * Use `create(MetadataSchema)` to create a new message. + */ +export const MetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 1); + +/** + * + * Represents cached output, either as literals or an URI, with associated metadata. + * + * @generated from message flyteidl2.cacheservice.CachedOutput + */ +export type CachedOutput = Message<"flyteidl2.cacheservice.CachedOutput"> & { + /** + * @generated from oneof flyteidl2.cacheservice.CachedOutput.output + */ + output: { + /** + * Output literals + * + * @generated from field: flyteidl2.core.LiteralMap output_literals = 1; + */ + value: LiteralMap; + case: "outputLiterals"; + } | { + /** + * URI to output data + * + * @generated from field: string output_uri = 2; + */ + value: string; + case: "outputUri"; + } | { case: undefined; value?: undefined }; + + /** + * Associated metadata + * + * @generated from field: flyteidl2.cacheservice.Metadata metadata = 3; + */ + metadata?: Metadata; +}; + +/** + * Describes the message flyteidl2.cacheservice.CachedOutput. + * Use `create(CachedOutputSchema)` to create a new message. + */ +export const CachedOutputSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 2); + +/** + * + * Request to retrieve cached data by key. + * + * @generated from message flyteidl2.cacheservice.GetCacheRequest + */ +export type GetCacheRequest = Message<"flyteidl2.cacheservice.GetCacheRequest"> & { + /** + * Cache key + * + * @generated from field: string key = 1; + */ + key: string; +}; + +/** + * Describes the message flyteidl2.cacheservice.GetCacheRequest. + * Use `create(GetCacheRequestSchema)` to create a new message. + */ +export const GetCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 3); + +/** + * + * Response with cached data for a given key. + * + * @generated from message flyteidl2.cacheservice.GetCacheResponse + */ +export type GetCacheResponse = Message<"flyteidl2.cacheservice.GetCacheResponse"> & { + /** + * Cached output + * + * @generated from field: flyteidl2.cacheservice.CachedOutput output = 1; + */ + output?: CachedOutput; +}; + +/** + * Describes the message flyteidl2.cacheservice.GetCacheResponse. + * Use `create(GetCacheResponseSchema)` to create a new message. + */ +export const GetCacheResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 4); + +/** + * @generated from message flyteidl2.cacheservice.OverwriteOutput + */ +export type OverwriteOutput = Message<"flyteidl2.cacheservice.OverwriteOutput"> & { + /** + * Overwrite flag + * + * @generated from field: bool overwrite = 1; + */ + overwrite: boolean; + + /** + * Delete existing blob + * + * @generated from field: bool delete_blob = 2; + */ + deleteBlob: boolean; + + /** + * Maximum age of the cached output since last update + * + * @generated from field: google.protobuf.Duration max_age = 3; + */ + maxAge?: Duration; +}; + +/** + * Describes the message flyteidl2.cacheservice.OverwriteOutput. + * Use `create(OverwriteOutputSchema)` to create a new message. + */ +export const OverwriteOutputSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 5); + +/** + * + * Request to store/update cached data by key. + * + * @generated from message flyteidl2.cacheservice.PutCacheRequest + */ +export type PutCacheRequest = Message<"flyteidl2.cacheservice.PutCacheRequest"> & { + /** + * Cache key + * + * @generated from field: string key = 1; + */ + key: string; + + /** + * Output to cache + * + * @generated from field: flyteidl2.cacheservice.CachedOutput output = 2; + */ + output?: CachedOutput; + + /** + * Overwrite flag if exists + * + * @generated from field: flyteidl2.cacheservice.OverwriteOutput overwrite = 3; + */ + overwrite?: OverwriteOutput; +}; + +/** + * Describes the message flyteidl2.cacheservice.PutCacheRequest. + * Use `create(PutCacheRequestSchema)` to create a new message. + */ +export const PutCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 6); + +/** + * + * Response message of cache store/update operation. + * + * Empty, success indicated by no errors + * + * @generated from message flyteidl2.cacheservice.PutCacheResponse + */ +export type PutCacheResponse = Message<"flyteidl2.cacheservice.PutCacheResponse"> & { +}; + +/** + * Describes the message flyteidl2.cacheservice.PutCacheResponse. + * Use `create(PutCacheResponseSchema)` to create a new message. + */ +export const PutCacheResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 7); + +/** + * + * Request to delete cached data by key. + * + * @generated from message flyteidl2.cacheservice.DeleteCacheRequest + */ +export type DeleteCacheRequest = Message<"flyteidl2.cacheservice.DeleteCacheRequest"> & { + /** + * Cache key + * + * @generated from field: string key = 1; + */ + key: string; +}; + +/** + * Describes the message flyteidl2.cacheservice.DeleteCacheRequest. + * Use `create(DeleteCacheRequestSchema)` to create a new message. + */ +export const DeleteCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 8); + +/** + * + * Response message of cache deletion operation. + * + * Empty, success indicated by no errors + * + * @generated from message flyteidl2.cacheservice.DeleteCacheResponse + */ +export type DeleteCacheResponse = Message<"flyteidl2.cacheservice.DeleteCacheResponse"> & { +}; + +/** + * Describes the message flyteidl2.cacheservice.DeleteCacheResponse. + * Use `create(DeleteCacheResponseSchema)` to create a new message. + */ +export const DeleteCacheResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 9); + +/** + * A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. + * + * @generated from message flyteidl2.cacheservice.Reservation + */ +export type Reservation = Message<"flyteidl2.cacheservice.Reservation"> & { + /** + * The unique ID for the reservation - same as the cache key + * + * @generated from field: string key = 1; + */ + key: string; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; + + /** + * Requested reservation extension heartbeat interval + * + * @generated from field: google.protobuf.Duration heartbeat_interval = 3; + */ + heartbeatInterval?: Duration; + + /** + * Expiration timestamp of this reservation + * + * @generated from field: google.protobuf.Timestamp expires_at = 4; + */ + expiresAt?: Timestamp; +}; + +/** + * Describes the message flyteidl2.cacheservice.Reservation. + * Use `create(ReservationSchema)` to create a new message. + */ +export const ReservationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 10); + +/** + * + * Request to get or extend a reservation for a cache key + * + * @generated from message flyteidl2.cacheservice.GetOrExtendReservationRequest + */ +export type GetOrExtendReservationRequest = Message<"flyteidl2.cacheservice.GetOrExtendReservationRequest"> & { + /** + * The unique ID for the reservation - same as the cache key + * + * @generated from field: string key = 1; + */ + key: string; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; + + /** + * Requested reservation extension heartbeat interval + * + * @generated from field: google.protobuf.Duration heartbeat_interval = 3; + */ + heartbeatInterval?: Duration; +}; + +/** + * Describes the message flyteidl2.cacheservice.GetOrExtendReservationRequest. + * Use `create(GetOrExtendReservationRequestSchema)` to create a new message. + */ +export const GetOrExtendReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 11); + +/** + * + * Request to get or extend a reservation for a cache key + * + * @generated from message flyteidl2.cacheservice.GetOrExtendReservationResponse + */ +export type GetOrExtendReservationResponse = Message<"flyteidl2.cacheservice.GetOrExtendReservationResponse"> & { + /** + * The reservation that was created or extended + * + * @generated from field: flyteidl2.cacheservice.Reservation reservation = 1; + */ + reservation?: Reservation; +}; + +/** + * Describes the message flyteidl2.cacheservice.GetOrExtendReservationResponse. + * Use `create(GetOrExtendReservationResponseSchema)` to create a new message. + */ +export const GetOrExtendReservationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 12); + +/** + * + * Request to release the reservation for a cache key + * + * @generated from message flyteidl2.cacheservice.ReleaseReservationRequest + */ +export type ReleaseReservationRequest = Message<"flyteidl2.cacheservice.ReleaseReservationRequest"> & { + /** + * The unique ID for the reservation - same as the cache key + * + * @generated from field: string key = 1; + */ + key: string; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; +}; + +/** + * Describes the message flyteidl2.cacheservice.ReleaseReservationRequest. + * Use `create(ReleaseReservationRequestSchema)` to create a new message. + */ +export const ReleaseReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 13); + +/** + * + * Response message of release reservation operation. + * + * Empty, success indicated by no errors + * + * @generated from message flyteidl2.cacheservice.ReleaseReservationResponse + */ +export type ReleaseReservationResponse = Message<"flyteidl2.cacheservice.ReleaseReservationResponse"> & { +}; + +/** + * Describes the message flyteidl2.cacheservice.ReleaseReservationResponse. + * Use `create(ReleaseReservationResponseSchema)` to create a new message. + */ +export const ReleaseReservationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_cacheservice, 14); + +/** + * + * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + * + * @generated from service flyteidl2.cacheservice.CacheService + */ +export const CacheService: GenService<{ + /** + * Retrieves cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.CacheService.Get + */ + get: { + methodKind: "unary"; + input: typeof GetCacheRequestSchema; + output: typeof GetCacheResponseSchema; + }, + /** + * Stores or updates cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.CacheService.Put + */ + put: { + methodKind: "unary"; + input: typeof PutCacheRequestSchema; + output: typeof PutCacheResponseSchema; + }, + /** + * Deletes cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.CacheService.Delete + */ + delete: { + methodKind: "unary"; + input: typeof DeleteCacheRequestSchema; + output: typeof DeleteCacheResponseSchema; + }, + /** + * Get or extend a reservation for a cache key + * + * @generated from rpc flyteidl2.cacheservice.CacheService.GetOrExtendReservation + */ + getOrExtendReservation: { + methodKind: "unary"; + input: typeof GetOrExtendReservationRequestSchema; + output: typeof GetOrExtendReservationResponseSchema; + }, + /** + * Release the reservation for a cache key + * + * @generated from rpc flyteidl2.cacheservice.CacheService.ReleaseReservation + */ + releaseReservation: { + methodKind: "unary"; + input: typeof ReleaseReservationRequestSchema; + output: typeof ReleaseReservationResponseSchema; + }, +}> = /*@__PURE__*/ + serviceDesc(file_flyteidl2_cacheservice_cacheservice, 0); + diff --git a/gen/ts/flyteidl2/cacheservice/v2/cacheservice_pb.ts b/gen/ts/flyteidl2/cacheservice/v2/cacheservice_pb.ts new file mode 100644 index 0000000000..82b274e027 --- /dev/null +++ b/gen/ts/flyteidl2/cacheservice/v2/cacheservice_pb.ts @@ -0,0 +1,249 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/cacheservice/v2/cacheservice.proto (package flyteidl2.cacheservice.v2, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv1"; +import { file_buf_validate_validate } from "../../../buf/validate/validate_pb.ts"; +import type { DeleteCacheRequest as DeleteCacheRequest$1, DeleteCacheResponseSchema, GetCacheRequest as GetCacheRequest$1, GetCacheResponseSchema, GetOrExtendReservationRequest as GetOrExtendReservationRequest$1, GetOrExtendReservationResponseSchema, PutCacheRequest as PutCacheRequest$1, PutCacheResponseSchema, ReleaseReservationRequest as ReleaseReservationRequest$1, ReleaseReservationResponseSchema } from "../cacheservice_pb.ts"; +import { file_flyteidl2_cacheservice_cacheservice } from "../cacheservice_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/cacheservice/v2/cacheservice.proto. + */ +export const file_flyteidl2_cacheservice_v2_cacheservice: GenFile = /*@__PURE__*/ + fileDesc("CixmbHl0ZWlkbDIvY2FjaGVzZXJ2aWNlL3YyL2NhY2hlc2VydmljZS5wcm90bxIZZmx5dGVpZGwyLmNhY2hlc2VydmljZS52MiJVCgpJZGVudGlmaWVyEhQKA29yZxgBIAEoCUIHukgEcgIQARIYCgdwcm9qZWN0GAIgASgJQge6SARyAhABEhcKBmRvbWFpbhgDIAEoCUIHukgEcgIQASKTAQoPR2V0Q2FjaGVSZXF1ZXN0Ej0KDGJhc2VfcmVxdWVzdBgBIAEoCzInLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuR2V0Q2FjaGVSZXF1ZXN0EkEKCmlkZW50aWZpZXIYAiABKAsyJS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLnYyLklkZW50aWZpZXJCBrpIA8gBASKTAQoPUHV0Q2FjaGVSZXF1ZXN0Ej0KDGJhc2VfcmVxdWVzdBgBIAEoCzInLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuUHV0Q2FjaGVSZXF1ZXN0EkEKCmlkZW50aWZpZXIYAiABKAsyJS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLnYyLklkZW50aWZpZXJCBrpIA8gBASKZAQoSRGVsZXRlQ2FjaGVSZXF1ZXN0EkAKDGJhc2VfcmVxdWVzdBgBIAEoCzIqLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuRGVsZXRlQ2FjaGVSZXF1ZXN0EkEKCmlkZW50aWZpZXIYAiABKAsyJS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLnYyLklkZW50aWZpZXJCBrpIA8gBASKvAQodR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlcXVlc3QSSwoMYmFzZV9yZXF1ZXN0GAEgASgLMjUuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5HZXRPckV4dGVuZFJlc2VydmF0aW9uUmVxdWVzdBJBCgppZGVudGlmaWVyGAIgASgLMiUuZmx5dGVpZGwyLmNhY2hlc2VydmljZS52Mi5JZGVudGlmaWVyQga6SAPIAQEipwEKGVJlbGVhc2VSZXNlcnZhdGlvblJlcXVlc3QSRwoMYmFzZV9yZXF1ZXN0GAEgASgLMjEuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5SZWxlYXNlUmVzZXJ2YXRpb25SZXF1ZXN0EkEKCmlkZW50aWZpZXIYAiABKAsyJS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLnYyLklkZW50aWZpZXJCBrpIA8gBATK7BAoMQ2FjaGVTZXJ2aWNlElsKA0dldBIqLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UudjIuR2V0Q2FjaGVSZXF1ZXN0GiguZmx5dGVpZGwyLmNhY2hlc2VydmljZS5HZXRDYWNoZVJlc3BvbnNlElsKA1B1dBIqLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UudjIuUHV0Q2FjaGVSZXF1ZXN0GiguZmx5dGVpZGwyLmNhY2hlc2VydmljZS5QdXRDYWNoZVJlc3BvbnNlEmQKBkRlbGV0ZRItLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UudjIuRGVsZXRlQ2FjaGVSZXF1ZXN0GisuZmx5dGVpZGwyLmNhY2hlc2VydmljZS5EZWxldGVDYWNoZVJlc3BvbnNlEooBChZHZXRPckV4dGVuZFJlc2VydmF0aW9uEjguZmx5dGVpZGwyLmNhY2hlc2VydmljZS52Mi5HZXRPckV4dGVuZFJlc2VydmF0aW9uUmVxdWVzdBo2LmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlc3BvbnNlEn4KElJlbGVhc2VSZXNlcnZhdGlvbhI0LmZseXRlaWRsMi5jYWNoZXNlcnZpY2UudjIuUmVsZWFzZVJlc2VydmF0aW9uUmVxdWVzdBoyLmZseXRlaWRsMi5jYWNoZXNlcnZpY2UuUmVsZWFzZVJlc2VydmF0aW9uUmVzcG9uc2VC+QEKHWNvbS5mbHl0ZWlkbDIuY2FjaGVzZXJ2aWNlLnYyQhFDYWNoZXNlcnZpY2VQcm90b0gCUAFaPWdpdGh1Yi5jb20vZmx5dGVvcmcvZmx5dGUvdjIvZ2VuL2dvL2ZseXRlaWRsMi9jYWNoZXNlcnZpY2UvdjKiAgNGQ1iqAhlGbHl0ZWlkbDIuQ2FjaGVzZXJ2aWNlLlYyygIZRmx5dGVpZGwyXENhY2hlc2VydmljZVxWMuICJUZseXRlaWRsMlxDYWNoZXNlcnZpY2VcVjJcR1BCTWV0YWRhdGHqAhtGbHl0ZWlkbDI6OkNhY2hlc2VydmljZTo6VjJiBnByb3RvMw", [file_buf_validate_validate, file_flyteidl2_cacheservice_cacheservice]); + +/** + * + * Identifier for cache operations, including org, project, and domain. + * This is used to scope cache operations to specific organizational contexts. + * + * @generated from message flyteidl2.cacheservice.v2.Identifier + */ +export type Identifier = Message<"flyteidl2.cacheservice.v2.Identifier"> & { + /** + * Organization identifier + * + * @generated from field: string org = 1; + */ + org: string; + + /** + * Project identifier + * + * @generated from field: string project = 2; + */ + project: string; + + /** + * Domain identifier + * + * @generated from field: string domain = 3; + */ + domain: string; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.Identifier. + * Use `create(IdentifierSchema)` to create a new message. + */ +export const IdentifierSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 0); + +/** + * + * Request to retrieve cached data by key. + * + * @generated from message flyteidl2.cacheservice.v2.GetCacheRequest + */ +export type GetCacheRequest = Message<"flyteidl2.cacheservice.v2.GetCacheRequest"> & { + /** + * @generated from field: flyteidl2.cacheservice.GetCacheRequest base_request = 1; + */ + baseRequest?: GetCacheRequest$1; + + /** + * Identifier for the cache operation + * + * @generated from field: flyteidl2.cacheservice.v2.Identifier identifier = 2; + */ + identifier?: Identifier; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.GetCacheRequest. + * Use `create(GetCacheRequestSchema)` to create a new message. + */ +export const GetCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 1); + +/** + * + * Request to store/update cached data by key. + * + * @generated from message flyteidl2.cacheservice.v2.PutCacheRequest + */ +export type PutCacheRequest = Message<"flyteidl2.cacheservice.v2.PutCacheRequest"> & { + /** + * @generated from field: flyteidl2.cacheservice.PutCacheRequest base_request = 1; + */ + baseRequest?: PutCacheRequest$1; + + /** + * Identifier for the cache operation + * + * @generated from field: flyteidl2.cacheservice.v2.Identifier identifier = 2; + */ + identifier?: Identifier; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.PutCacheRequest. + * Use `create(PutCacheRequestSchema)` to create a new message. + */ +export const PutCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 2); + +/** + * + * Request to delete cached data by key. + * + * @generated from message flyteidl2.cacheservice.v2.DeleteCacheRequest + */ +export type DeleteCacheRequest = Message<"flyteidl2.cacheservice.v2.DeleteCacheRequest"> & { + /** + * @generated from field: flyteidl2.cacheservice.DeleteCacheRequest base_request = 1; + */ + baseRequest?: DeleteCacheRequest$1; + + /** + * Identifier for the cache operation + * + * @generated from field: flyteidl2.cacheservice.v2.Identifier identifier = 2; + */ + identifier?: Identifier; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.DeleteCacheRequest. + * Use `create(DeleteCacheRequestSchema)` to create a new message. + */ +export const DeleteCacheRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 3); + +/** + * + * Request to get or extend a reservation for a cache key + * + * @generated from message flyteidl2.cacheservice.v2.GetOrExtendReservationRequest + */ +export type GetOrExtendReservationRequest = Message<"flyteidl2.cacheservice.v2.GetOrExtendReservationRequest"> & { + /** + * @generated from field: flyteidl2.cacheservice.GetOrExtendReservationRequest base_request = 1; + */ + baseRequest?: GetOrExtendReservationRequest$1; + + /** + * Identifier for the cache operation + * + * @generated from field: flyteidl2.cacheservice.v2.Identifier identifier = 2; + */ + identifier?: Identifier; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.GetOrExtendReservationRequest. + * Use `create(GetOrExtendReservationRequestSchema)` to create a new message. + */ +export const GetOrExtendReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 4); + +/** + * + * Request to release the reservation for a cache key + * + * @generated from message flyteidl2.cacheservice.v2.ReleaseReservationRequest + */ +export type ReleaseReservationRequest = Message<"flyteidl2.cacheservice.v2.ReleaseReservationRequest"> & { + /** + * @generated from field: flyteidl2.cacheservice.ReleaseReservationRequest base_request = 1; + */ + baseRequest?: ReleaseReservationRequest$1; + + /** + * Identifier for the cache operation + * + * @generated from field: flyteidl2.cacheservice.v2.Identifier identifier = 2; + */ + identifier?: Identifier; +}; + +/** + * Describes the message flyteidl2.cacheservice.v2.ReleaseReservationRequest. + * Use `create(ReleaseReservationRequestSchema)` to create a new message. + */ +export const ReleaseReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_cacheservice_v2_cacheservice, 5); + +/** + * + * CacheService defines operations for cache management including retrieval, storage, and deletion of cached task/workflow outputs. + * + * @generated from service flyteidl2.cacheservice.v2.CacheService + */ +export const CacheService: GenService<{ + /** + * Retrieves cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.v2.CacheService.Get + */ + get: { + methodKind: "unary"; + input: typeof GetCacheRequestSchema; + output: typeof GetCacheResponseSchema; + }, + /** + * Stores or updates cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.v2.CacheService.Put + */ + put: { + methodKind: "unary"; + input: typeof PutCacheRequestSchema; + output: typeof PutCacheResponseSchema; + }, + /** + * Deletes cached data by key. + * + * @generated from rpc flyteidl2.cacheservice.v2.CacheService.Delete + */ + delete: { + methodKind: "unary"; + input: typeof DeleteCacheRequestSchema; + output: typeof DeleteCacheResponseSchema; + }, + /** + * Get or extend a reservation for a cache key + * + * @generated from rpc flyteidl2.cacheservice.v2.CacheService.GetOrExtendReservation + */ + getOrExtendReservation: { + methodKind: "unary"; + input: typeof GetOrExtendReservationRequestSchema; + output: typeof GetOrExtendReservationResponseSchema; + }, + /** + * Release the reservation for a cache key + * + * @generated from rpc flyteidl2.cacheservice.v2.CacheService.ReleaseReservation + */ + releaseReservation: { + methodKind: "unary"; + input: typeof ReleaseReservationRequestSchema; + output: typeof ReleaseReservationResponseSchema; + }, +}> = /*@__PURE__*/ + serviceDesc(file_flyteidl2_cacheservice_v2_cacheservice, 0); + diff --git a/gen/ts/flyteidl2/common/configuration_pb.ts b/gen/ts/flyteidl2/common/configuration_pb.ts new file mode 100644 index 0000000000..a8b5c1f8bb --- /dev/null +++ b/gen/ts/flyteidl2/common/configuration_pb.ts @@ -0,0 +1,68 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/common/configuration.proto (package flyteidl2.common, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc } from "@bufbuild/protobuf/codegenv1"; + +/** + * Describes the file flyteidl2/common/configuration.proto. + */ +export const file_flyteidl2_common_configuration: GenFile = /*@__PURE__*/ + fileDesc("CiRmbHl0ZWlkbDIvY29tbW9uL2NvbmZpZ3VyYXRpb24ucHJvdG8SEGZseXRlaWRsMi5jb21tb24qbAoQQXR0cmlidXRlc1NvdXJjZRIWChJTT1VSQ0VfVU5TUEVDSUZJRUQQABIKCgZHTE9CQUwQARIKCgZET01BSU4QAhILCgdQUk9KRUNUEAMSEgoOUFJPSkVDVF9ET01BSU4QBBIHCgNPUkcQBULDAQoUY29tLmZseXRlaWRsMi5jb21tb25CEkNvbmZpZ3VyYXRpb25Qcm90b0gCUAFaNGdpdGh1Yi5jb20vZmx5dGVvcmcvZmx5dGUvdjIvZ2VuL2dvL2ZseXRlaWRsMi9jb21tb26iAgNGQ1iqAhBGbHl0ZWlkbDIuQ29tbW9uygIQRmx5dGVpZGwyXENvbW1vbuICHEZseXRlaWRsMlxDb21tb25cR1BCTWV0YWRhdGHqAhFGbHl0ZWlkbDI6OkNvbW1vbmIGcHJvdG8z"); + +/** + * The source of an attribute. We may have other sources in the future. + * + * @generated from enum flyteidl2.common.AttributesSource + */ +export enum AttributesSource { + /** + * The source is unspecified. + * + * @generated from enum value: SOURCE_UNSPECIFIED = 0; + */ + SOURCE_UNSPECIFIED = 0, + + /** + * The configuration is a global configuration. + * + * @generated from enum value: GLOBAL = 1; + */ + GLOBAL = 1, + + /** + * The configuration is a domain configuration. + * + * @generated from enum value: DOMAIN = 2; + */ + DOMAIN = 2, + + /** + * The configuration is a project configuration. + * + * @generated from enum value: PROJECT = 3; + */ + PROJECT = 3, + + /** + * The configuration is a project-domain configuration. + * + * @generated from enum value: PROJECT_DOMAIN = 4; + */ + PROJECT_DOMAIN = 4, + + /** + * The configuration is a org configuration. + * + * @generated from enum value: ORG = 5; + */ + ORG = 5, +} + +/** + * Describes the enum flyteidl2.common.AttributesSource. + */ +export const AttributesSourceSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_common_configuration, 0); + diff --git a/gen/ts/flyteidl2/core/errors_pb.ts b/gen/ts/flyteidl2/core/errors_pb.ts new file mode 100644 index 0000000000..e30f657cff --- /dev/null +++ b/gen/ts/flyteidl2/core/errors_pb.ts @@ -0,0 +1,104 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/core/errors.proto (package flyteidl2.core, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { ExecutionError_ErrorKind } from "./execution_pb.ts"; +import { file_flyteidl2_core_execution } from "./execution_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/core/errors.proto. + */ +export const file_flyteidl2_core_errors: GenFile = /*@__PURE__*/ + fileDesc("ChtmbHl0ZWlkbDIvY29yZS9lcnJvcnMucHJvdG8SDmZseXRlaWRsMi5jb3JlIsoBCg5Db250YWluZXJFcnJvchIMCgRjb2RlGAEgASgJEg8KB21lc3NhZ2UYAiABKAkSMQoEa2luZBgDIAEoDjIjLmZseXRlaWRsMi5jb3JlLkNvbnRhaW5lckVycm9yLktpbmQSOAoGb3JpZ2luGAQgASgOMiguZmx5dGVpZGwyLmNvcmUuRXhlY3V0aW9uRXJyb3IuRXJyb3JLaW5kIiwKBEtpbmQSEwoPTk9OX1JFQ09WRVJBQkxFEAASDwoLUkVDT1ZFUkFCTEUQASI+Cg1FcnJvckRvY3VtZW50Ei0KBWVycm9yGAEgASgLMh4uZmx5dGVpZGwyLmNvcmUuQ29udGFpbmVyRXJyb3JCsAEKEmNvbS5mbHl0ZWlkbDIuY29yZUILRXJyb3JzUHJvdG9IAlABWjJnaXRodWIuY29tL2ZseXRlb3JnL2ZseXRlL3YyL2dlbi9nby9mbHl0ZWlkbDIvY29yZaICA0ZDWKoCDkZseXRlaWRsMi5Db3JlygIORmx5dGVpZGwyXENvcmXiAhpGbHl0ZWlkbDJcQ29yZVxHUEJNZXRhZGF0YeoCD0ZseXRlaWRsMjo6Q29yZWIGcHJvdG8z", [file_flyteidl2_core_execution]); + +/** + * Error message to propagate detailed errors from container executions to the execution + * engine. + * + * @generated from message flyteidl2.core.ContainerError + */ +export type ContainerError = Message<"flyteidl2.core.ContainerError"> & { + /** + * A simplified code for errors, so that we can provide a glossary of all possible errors. + * + * @generated from field: string code = 1; + */ + code: string; + + /** + * A detailed error message. + * + * @generated from field: string message = 2; + */ + message: string; + + /** + * An abstract error kind for this error. Defaults to Non_Recoverable if not specified. + * + * @generated from field: flyteidl2.core.ContainerError.Kind kind = 3; + */ + kind: ContainerError_Kind; + + /** + * Defines the origin of the error (system, user, unknown). + * + * @generated from field: flyteidl2.core.ExecutionError.ErrorKind origin = 4; + */ + origin: ExecutionError_ErrorKind; +}; + +/** + * Describes the message flyteidl2.core.ContainerError. + * Use `create(ContainerErrorSchema)` to create a new message. + */ +export const ContainerErrorSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_core_errors, 0); + +/** + * Defines a generic error type that dictates the behavior of the retry strategy. + * + * @generated from enum flyteidl2.core.ContainerError.Kind + */ +export enum ContainerError_Kind { + /** + * @generated from enum value: NON_RECOVERABLE = 0; + */ + NON_RECOVERABLE = 0, + + /** + * @generated from enum value: RECOVERABLE = 1; + */ + RECOVERABLE = 1, +} + +/** + * Describes the enum flyteidl2.core.ContainerError.Kind. + */ +export const ContainerError_KindSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_core_errors, 0, 0); + +/** + * Defines the errors.pb file format the container can produce to communicate + * failure reasons to the execution engine. + * + * @generated from message flyteidl2.core.ErrorDocument + */ +export type ErrorDocument = Message<"flyteidl2.core.ErrorDocument"> & { + /** + * The error raised during execution. + * + * @generated from field: flyteidl2.core.ContainerError error = 1; + */ + error?: ContainerError; +}; + +/** + * Describes the message flyteidl2.core.ErrorDocument. + * Use `create(ErrorDocumentSchema)` to create a new message. + */ +export const ErrorDocumentSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_core_errors, 1); + diff --git a/gen/ts/flyteidl2/datacatalog/datacatalog_pb.ts b/gen/ts/flyteidl2/datacatalog/datacatalog_pb.ts new file mode 100644 index 0000000000..7a52621550 --- /dev/null +++ b/gen/ts/flyteidl2/datacatalog/datacatalog_pb.ts @@ -0,0 +1,1299 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/datacatalog/datacatalog.proto (package flyteidl2.datacatalog, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage, GenService } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc, serviceDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Literal } from "../core/literals_pb.ts"; +import { file_flyteidl2_core_literals } from "../core/literals_pb.ts"; +import type { Duration, Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_duration, file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/datacatalog/datacatalog.proto. + */ +export const file_flyteidl2_datacatalog_datacatalog: GenFile = /*@__PURE__*/ + fileDesc("CidmbHl0ZWlkbDIvZGF0YWNhdGFsb2cvZGF0YWNhdGFsb2cucHJvdG8SFWZseXRlaWRsMi5kYXRhY2F0YWxvZyJHChRDcmVhdGVEYXRhc2V0UmVxdWVzdBIvCgdkYXRhc2V0GAEgASgLMh4uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXQiFwoVQ3JlYXRlRGF0YXNldFJlc3BvbnNlIkYKEUdldERhdGFzZXRSZXF1ZXN0EjEKB2RhdGFzZXQYASABKAsyIC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRGF0YXNldElEIkUKEkdldERhdGFzZXRSZXNwb25zZRIvCgdkYXRhc2V0GAEgASgLMh4uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXQiggEKEkdldEFydGlmYWN0UmVxdWVzdBIxCgdkYXRhc2V0GAEgASgLMiAuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXRJRBIVCgthcnRpZmFjdF9pZBgCIAEoCUgAEhIKCHRhZ19uYW1lGAMgASgJSABCDgoMcXVlcnlfaGFuZGxlIkgKE0dldEFydGlmYWN0UmVzcG9uc2USMQoIYXJ0aWZhY3QYASABKAsyHy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuQXJ0aWZhY3QiSgoVQ3JlYXRlQXJ0aWZhY3RSZXF1ZXN0EjEKCGFydGlmYWN0GAEgASgLMh8uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkFydGlmYWN0IhgKFkNyZWF0ZUFydGlmYWN0UmVzcG9uc2UiOAoNQWRkVGFnUmVxdWVzdBInCgN0YWcYASABKAsyGi5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuVGFnIhAKDkFkZFRhZ1Jlc3BvbnNlIsABChRMaXN0QXJ0aWZhY3RzUmVxdWVzdBIxCgdkYXRhc2V0GAEgASgLMiAuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXRJRBI3CgZmaWx0ZXIYAiABKAsyJy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRmlsdGVyRXhwcmVzc2lvbhI8CgpwYWdpbmF0aW9uGAMgASgLMiguZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlBhZ2luYXRpb25PcHRpb25zIl8KFUxpc3RBcnRpZmFjdHNSZXNwb25zZRIyCglhcnRpZmFjdHMYASADKAsyHy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuQXJ0aWZhY3QSEgoKbmV4dF90b2tlbhgCIAEoCSKMAQoTTGlzdERhdGFzZXRzUmVxdWVzdBI3CgZmaWx0ZXIYASABKAsyJy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRmlsdGVyRXhwcmVzc2lvbhI8CgpwYWdpbmF0aW9uGAIgASgLMiguZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlBhZ2luYXRpb25PcHRpb25zIlwKFExpc3REYXRhc2V0c1Jlc3BvbnNlEjAKCGRhdGFzZXRzGAEgAygLMh4uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXQSEgoKbmV4dF90b2tlbhgCIAEoCSLrAQoVVXBkYXRlQXJ0aWZhY3RSZXF1ZXN0EjEKB2RhdGFzZXQYASABKAsyIC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRGF0YXNldElEEhUKC2FydGlmYWN0X2lkGAIgASgJSAASEgoIdGFnX25hbWUYAyABKAlIABIxCgRkYXRhGAQgAygLMiMuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkFydGlmYWN0RGF0YRIxCghtZXRhZGF0YRgFIAEoCzIfLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5NZXRhZGF0YUIOCgxxdWVyeV9oYW5kbGUiLQoWVXBkYXRlQXJ0aWZhY3RSZXNwb25zZRITCgthcnRpZmFjdF9pZBgBIAEoCSJXCg1SZXNlcnZhdGlvbklEEjQKCmRhdGFzZXRfaWQYASABKAsyIC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRGF0YXNldElEEhAKCHRhZ19uYW1lGAIgASgJIqYBCh1HZXRPckV4dGVuZFJlc2VydmF0aW9uUmVxdWVzdBI8Cg5yZXNlcnZhdGlvbl9pZBgBIAEoCzIkLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5SZXNlcnZhdGlvbklEEhAKCG93bmVyX2lkGAIgASgJEjUKEmhlYXJ0YmVhdF9pbnRlcnZhbBgDIAEoCzIZLmdvb2dsZS5wcm90b2J1Zi5EdXJhdGlvbiL3AQoLUmVzZXJ2YXRpb24SPAoOcmVzZXJ2YXRpb25faWQYASABKAsyJC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuUmVzZXJ2YXRpb25JRBIQCghvd25lcl9pZBgCIAEoCRI1ChJoZWFydGJlYXRfaW50ZXJ2YWwYAyABKAsyGS5nb29nbGUucHJvdG9idWYuRHVyYXRpb24SLgoKZXhwaXJlc19hdBgEIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASMQoIbWV0YWRhdGEYBiABKAsyHy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuTWV0YWRhdGEiWQoeR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlc3BvbnNlEjcKC3Jlc2VydmF0aW9uGAEgASgLMiIuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlJlc2VydmF0aW9uImsKGVJlbGVhc2VSZXNlcnZhdGlvblJlcXVlc3QSPAoOcmVzZXJ2YXRpb25faWQYASABKAsyJC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuUmVzZXJ2YXRpb25JRBIQCghvd25lcl9pZBgCIAEoCSIcChpSZWxlYXNlUmVzZXJ2YXRpb25SZXNwb25zZSKBAQoHRGF0YXNldBIsCgJpZBgBIAEoCzIgLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5EYXRhc2V0SUQSMQoIbWV0YWRhdGEYAiABKAsyHy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuTWV0YWRhdGESFQoNcGFydGl0aW9uS2V5cxgDIAMoCSInCglQYXJ0aXRpb24SCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJImYKCURhdGFzZXRJRBIPCgdwcm9qZWN0GAEgASgJEgwKBG5hbWUYAiABKAkSDgoGZG9tYWluGAMgASgJEg8KB3ZlcnNpb24YBCABKAkSDAoEVVVJRBgFIAEoCRILCgNvcmcYBiABKAkivwIKCEFydGlmYWN0EgoKAmlkGAEgASgJEjEKB2RhdGFzZXQYAiABKAsyIC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRGF0YXNldElEEjEKBGRhdGEYAyADKAsyIy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuQXJ0aWZhY3REYXRhEjEKCG1ldGFkYXRhGAQgASgLMh8uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLk1ldGFkYXRhEjQKCnBhcnRpdGlvbnMYBSADKAsyIC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuUGFydGl0aW9uEigKBHRhZ3MYBiADKAsyGi5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuVGFnEi4KCmNyZWF0ZWRfYXQYByABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wIkQKDEFydGlmYWN0RGF0YRIMCgRuYW1lGAEgASgJEiYKBXZhbHVlGAIgASgLMhcuZmx5dGVpZGwyLmNvcmUuTGl0ZXJhbCJbCgNUYWcSDAoEbmFtZRgBIAEoCRITCgthcnRpZmFjdF9pZBgCIAEoCRIxCgdkYXRhc2V0GAMgASgLMiAuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkRhdGFzZXRJRCJ3CghNZXRhZGF0YRI8CgdrZXlfbWFwGAEgAygLMisuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLk1ldGFkYXRhLktleU1hcEVudHJ5Gi0KC0tleU1hcEVudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEiUAoQRmlsdGVyRXhwcmVzc2lvbhI8CgdmaWx0ZXJzGAEgAygLMisuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlNpbmdsZVByb3BlcnR5RmlsdGVyIrsDChRTaW5nbGVQcm9wZXJ0eUZpbHRlchI+Cgp0YWdfZmlsdGVyGAEgASgLMiguZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlRhZ1Byb3BlcnR5RmlsdGVySAASSgoQcGFydGl0aW9uX2ZpbHRlchgCIAEoCzIuLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5QYXJ0aXRpb25Qcm9wZXJ0eUZpbHRlckgAEkgKD2FydGlmYWN0X2ZpbHRlchgDIAEoCzItLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5BcnRpZmFjdFByb3BlcnR5RmlsdGVySAASRgoOZGF0YXNldF9maWx0ZXIYBCABKAsyLC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuRGF0YXNldFByb3BlcnR5RmlsdGVySAASUAoIb3BlcmF0b3IYCiABKA4yPi5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuU2luZ2xlUHJvcGVydHlGaWx0ZXIuQ29tcGFyaXNvbk9wZXJhdG9yIiAKEkNvbXBhcmlzb25PcGVyYXRvchIKCgZFUVVBTFMQAEIRCg9wcm9wZXJ0eV9maWx0ZXIiOwoWQXJ0aWZhY3RQcm9wZXJ0eUZpbHRlchIVCgthcnRpZmFjdF9pZBgBIAEoCUgAQgoKCHByb3BlcnR5IjMKEVRhZ1Byb3BlcnR5RmlsdGVyEhIKCHRhZ19uYW1lGAEgASgJSABCCgoIcHJvcGVydHkiXQoXUGFydGl0aW9uUHJvcGVydHlGaWx0ZXISNgoHa2V5X3ZhbBgBIAEoCzIjLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5LZXlWYWx1ZVBhaXJIAEIKCghwcm9wZXJ0eSIqCgxLZXlWYWx1ZVBhaXISCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJInoKFURhdGFzZXRQcm9wZXJ0eUZpbHRlchIRCgdwcm9qZWN0GAEgASgJSAASDgoEbmFtZRgCIAEoCUgAEhAKBmRvbWFpbhgDIAEoCUgAEhEKB3ZlcnNpb24YBCABKAlIABINCgNvcmcYBSABKAlIAEIKCghwcm9wZXJ0eSKFAgoRUGFnaW5hdGlvbk9wdGlvbnMSDQoFbGltaXQYASABKA0SDQoFdG9rZW4YAiABKAkSQQoHc29ydEtleRgDIAEoDjIwLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5QYWdpbmF0aW9uT3B0aW9ucy5Tb3J0S2V5EkUKCXNvcnRPcmRlchgEIAEoDjIyLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5QYWdpbmF0aW9uT3B0aW9ucy5Tb3J0T3JkZXIiKgoJU29ydE9yZGVyEg4KCkRFU0NFTkRJTkcQABINCglBU0NFTkRJTkcQASIcCgdTb3J0S2V5EhEKDUNSRUFUSU9OX1RJTUUQADLPCAoLRGF0YUNhdGFsb2cSagoNQ3JlYXRlRGF0YXNldBIrLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5DcmVhdGVEYXRhc2V0UmVxdWVzdBosLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5DcmVhdGVEYXRhc2V0UmVzcG9uc2USYQoKR2V0RGF0YXNldBIoLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5HZXREYXRhc2V0UmVxdWVzdBopLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5HZXREYXRhc2V0UmVzcG9uc2USbQoOQ3JlYXRlQXJ0aWZhY3QSLC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuQ3JlYXRlQXJ0aWZhY3RSZXF1ZXN0Gi0uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkNyZWF0ZUFydGlmYWN0UmVzcG9uc2USZAoLR2V0QXJ0aWZhY3QSKS5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuR2V0QXJ0aWZhY3RSZXF1ZXN0GiouZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkdldEFydGlmYWN0UmVzcG9uc2USVQoGQWRkVGFnEiQuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkFkZFRhZ1JlcXVlc3QaJS5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuQWRkVGFnUmVzcG9uc2USagoNTGlzdEFydGlmYWN0cxIrLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5MaXN0QXJ0aWZhY3RzUmVxdWVzdBosLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5MaXN0QXJ0aWZhY3RzUmVzcG9uc2USZwoMTGlzdERhdGFzZXRzEiouZmx5dGVpZGwyLmRhdGFjYXRhbG9nLkxpc3REYXRhc2V0c1JlcXVlc3QaKy5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuTGlzdERhdGFzZXRzUmVzcG9uc2USbQoOVXBkYXRlQXJ0aWZhY3QSLC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuVXBkYXRlQXJ0aWZhY3RSZXF1ZXN0Gi0uZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlVwZGF0ZUFydGlmYWN0UmVzcG9uc2UShQEKFkdldE9yRXh0ZW5kUmVzZXJ2YXRpb24SNC5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlcXVlc3QaNS5mbHl0ZWlkbDIuZGF0YWNhdGFsb2cuR2V0T3JFeHRlbmRSZXNlcnZhdGlvblJlc3BvbnNlEnkKElJlbGVhc2VSZXNlcnZhdGlvbhIwLmZseXRlaWRsMi5kYXRhY2F0YWxvZy5SZWxlYXNlUmVzZXJ2YXRpb25SZXF1ZXN0GjEuZmx5dGVpZGwyLmRhdGFjYXRhbG9nLlJlbGVhc2VSZXNlcnZhdGlvblJlc3BvbnNlQt8BChljb20uZmx5dGVpZGwyLmRhdGFjYXRhbG9nQhBEYXRhY2F0YWxvZ1Byb3RvSAJQAVo5Z2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL2RhdGFjYXRhbG9nogIDRkRYqgIVRmx5dGVpZGwyLkRhdGFjYXRhbG9nygIVRmx5dGVpZGwyXERhdGFjYXRhbG9n4gIhRmx5dGVpZGwyXERhdGFjYXRhbG9nXEdQQk1ldGFkYXRh6gIWRmx5dGVpZGwyOjpEYXRhY2F0YWxvZ2IGcHJvdG8z", [file_flyteidl2_core_literals, file_google_protobuf_duration, file_google_protobuf_timestamp]); + +/** + * + * Request message for creating a Dataset. + * + * @generated from message flyteidl2.datacatalog.CreateDatasetRequest + */ +export type CreateDatasetRequest = Message<"flyteidl2.datacatalog.CreateDatasetRequest"> & { + /** + * @generated from field: flyteidl2.datacatalog.Dataset dataset = 1; + */ + dataset?: Dataset; +}; + +/** + * Describes the message flyteidl2.datacatalog.CreateDatasetRequest. + * Use `create(CreateDatasetRequestSchema)` to create a new message. + */ +export const CreateDatasetRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 0); + +/** + * + * Response message for creating a Dataset + * + * @generated from message flyteidl2.datacatalog.CreateDatasetResponse + */ +export type CreateDatasetResponse = Message<"flyteidl2.datacatalog.CreateDatasetResponse"> & { +}; + +/** + * Describes the message flyteidl2.datacatalog.CreateDatasetResponse. + * Use `create(CreateDatasetResponseSchema)` to create a new message. + */ +export const CreateDatasetResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 1); + +/** + * + * Request message for retrieving a Dataset. The Dataset is retrieved by it's unique identifier + * which is a combination of several fields. + * + * @generated from message flyteidl2.datacatalog.GetDatasetRequest + */ +export type GetDatasetRequest = Message<"flyteidl2.datacatalog.GetDatasetRequest"> & { + /** + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 1; + */ + dataset?: DatasetID; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetDatasetRequest. + * Use `create(GetDatasetRequestSchema)` to create a new message. + */ +export const GetDatasetRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 2); + +/** + * + * Response message for retrieving a Dataset. The response will include the metadata for the + * Dataset. + * + * @generated from message flyteidl2.datacatalog.GetDatasetResponse + */ +export type GetDatasetResponse = Message<"flyteidl2.datacatalog.GetDatasetResponse"> & { + /** + * @generated from field: flyteidl2.datacatalog.Dataset dataset = 1; + */ + dataset?: Dataset; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetDatasetResponse. + * Use `create(GetDatasetResponseSchema)` to create a new message. + */ +export const GetDatasetResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 3); + +/** + * + * Request message for retrieving an Artifact. Retrieve an artifact based on a query handle that + * can be one of artifact_id or tag. The result returned will include the artifact data and metadata + * associated with the artifact. + * + * @generated from message flyteidl2.datacatalog.GetArtifactRequest + */ +export type GetArtifactRequest = Message<"flyteidl2.datacatalog.GetArtifactRequest"> & { + /** + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 1; + */ + dataset?: DatasetID; + + /** + * @generated from oneof flyteidl2.datacatalog.GetArtifactRequest.query_handle + */ + queryHandle: { + /** + * @generated from field: string artifact_id = 2; + */ + value: string; + case: "artifactId"; + } | { + /** + * @generated from field: string tag_name = 3; + */ + value: string; + case: "tagName"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetArtifactRequest. + * Use `create(GetArtifactRequestSchema)` to create a new message. + */ +export const GetArtifactRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 4); + +/** + * + * Response message for retrieving an Artifact. The result returned will include the artifact data + * and metadata associated with the artifact. + * + * @generated from message flyteidl2.datacatalog.GetArtifactResponse + */ +export type GetArtifactResponse = Message<"flyteidl2.datacatalog.GetArtifactResponse"> & { + /** + * @generated from field: flyteidl2.datacatalog.Artifact artifact = 1; + */ + artifact?: Artifact; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetArtifactResponse. + * Use `create(GetArtifactResponseSchema)` to create a new message. + */ +export const GetArtifactResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 5); + +/** + * + * Request message for creating an Artifact and its associated artifact Data. + * + * @generated from message flyteidl2.datacatalog.CreateArtifactRequest + */ +export type CreateArtifactRequest = Message<"flyteidl2.datacatalog.CreateArtifactRequest"> & { + /** + * @generated from field: flyteidl2.datacatalog.Artifact artifact = 1; + */ + artifact?: Artifact; +}; + +/** + * Describes the message flyteidl2.datacatalog.CreateArtifactRequest. + * Use `create(CreateArtifactRequestSchema)` to create a new message. + */ +export const CreateArtifactRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 6); + +/** + * + * Response message for creating an Artifact. + * + * @generated from message flyteidl2.datacatalog.CreateArtifactResponse + */ +export type CreateArtifactResponse = Message<"flyteidl2.datacatalog.CreateArtifactResponse"> & { +}; + +/** + * Describes the message flyteidl2.datacatalog.CreateArtifactResponse. + * Use `create(CreateArtifactResponseSchema)` to create a new message. + */ +export const CreateArtifactResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 7); + +/** + * + * Request message for tagging an Artifact. + * + * @generated from message flyteidl2.datacatalog.AddTagRequest + */ +export type AddTagRequest = Message<"flyteidl2.datacatalog.AddTagRequest"> & { + /** + * @generated from field: flyteidl2.datacatalog.Tag tag = 1; + */ + tag?: Tag; +}; + +/** + * Describes the message flyteidl2.datacatalog.AddTagRequest. + * Use `create(AddTagRequestSchema)` to create a new message. + */ +export const AddTagRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 8); + +/** + * + * Response message for tagging an Artifact. + * + * @generated from message flyteidl2.datacatalog.AddTagResponse + */ +export type AddTagResponse = Message<"flyteidl2.datacatalog.AddTagResponse"> & { +}; + +/** + * Describes the message flyteidl2.datacatalog.AddTagResponse. + * Use `create(AddTagResponseSchema)` to create a new message. + */ +export const AddTagResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 9); + +/** + * List the artifacts that belong to the Dataset, optionally filtered using filtered expression. + * + * @generated from message flyteidl2.datacatalog.ListArtifactsRequest + */ +export type ListArtifactsRequest = Message<"flyteidl2.datacatalog.ListArtifactsRequest"> & { + /** + * Use a datasetID for which you want to retrieve the artifacts + * + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 1; + */ + dataset?: DatasetID; + + /** + * Apply the filter expression to this query + * + * @generated from field: flyteidl2.datacatalog.FilterExpression filter = 2; + */ + filter?: FilterExpression; + + /** + * Pagination options to get a page of artifacts + * + * @generated from field: flyteidl2.datacatalog.PaginationOptions pagination = 3; + */ + pagination?: PaginationOptions; +}; + +/** + * Describes the message flyteidl2.datacatalog.ListArtifactsRequest. + * Use `create(ListArtifactsRequestSchema)` to create a new message. + */ +export const ListArtifactsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 10); + +/** + * Response to list artifacts + * + * @generated from message flyteidl2.datacatalog.ListArtifactsResponse + */ +export type ListArtifactsResponse = Message<"flyteidl2.datacatalog.ListArtifactsResponse"> & { + /** + * The list of artifacts + * + * @generated from field: repeated flyteidl2.datacatalog.Artifact artifacts = 1; + */ + artifacts: Artifact[]; + + /** + * Token to use to request the next page, pass this into the next requests PaginationOptions + * + * @generated from field: string next_token = 2; + */ + nextToken: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.ListArtifactsResponse. + * Use `create(ListArtifactsResponseSchema)` to create a new message. + */ +export const ListArtifactsResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 11); + +/** + * List the datasets for the given query + * + * @generated from message flyteidl2.datacatalog.ListDatasetsRequest + */ +export type ListDatasetsRequest = Message<"flyteidl2.datacatalog.ListDatasetsRequest"> & { + /** + * Apply the filter expression to this query + * + * @generated from field: flyteidl2.datacatalog.FilterExpression filter = 1; + */ + filter?: FilterExpression; + + /** + * Pagination options to get a page of datasets + * + * @generated from field: flyteidl2.datacatalog.PaginationOptions pagination = 2; + */ + pagination?: PaginationOptions; +}; + +/** + * Describes the message flyteidl2.datacatalog.ListDatasetsRequest. + * Use `create(ListDatasetsRequestSchema)` to create a new message. + */ +export const ListDatasetsRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 12); + +/** + * List the datasets response with token for next pagination + * + * @generated from message flyteidl2.datacatalog.ListDatasetsResponse + */ +export type ListDatasetsResponse = Message<"flyteidl2.datacatalog.ListDatasetsResponse"> & { + /** + * The list of datasets + * + * @generated from field: repeated flyteidl2.datacatalog.Dataset datasets = 1; + */ + datasets: Dataset[]; + + /** + * Token to use to request the next page, pass this into the next requests PaginationOptions + * + * @generated from field: string next_token = 2; + */ + nextToken: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.ListDatasetsResponse. + * Use `create(ListDatasetsResponseSchema)` to create a new message. + */ +export const ListDatasetsResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 13); + +/** + * + * Request message for updating an Artifact and overwriting its associated ArtifactData. + * + * @generated from message flyteidl2.datacatalog.UpdateArtifactRequest + */ +export type UpdateArtifactRequest = Message<"flyteidl2.datacatalog.UpdateArtifactRequest"> & { + /** + * ID of dataset the artifact is associated with + * + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 1; + */ + dataset?: DatasetID; + + /** + * Either ID of artifact or name of tag to retrieve existing artifact from + * + * @generated from oneof flyteidl2.datacatalog.UpdateArtifactRequest.query_handle + */ + queryHandle: { + /** + * @generated from field: string artifact_id = 2; + */ + value: string; + case: "artifactId"; + } | { + /** + * @generated from field: string tag_name = 3; + */ + value: string; + case: "tagName"; + } | { case: undefined; value?: undefined }; + + /** + * List of data to overwrite stored artifact data with. Must contain ALL data for updated Artifact as any missing + * ArtifactData entries will be removed from the underlying blob storage and database. + * + * @generated from field: repeated flyteidl2.datacatalog.ArtifactData data = 4; + */ + data: ArtifactData[]; + + /** + * Update execution metadata(including execution domain, name, node, project data) when overwriting cache + * + * @generated from field: flyteidl2.datacatalog.Metadata metadata = 5; + */ + metadata?: Metadata; +}; + +/** + * Describes the message flyteidl2.datacatalog.UpdateArtifactRequest. + * Use `create(UpdateArtifactRequestSchema)` to create a new message. + */ +export const UpdateArtifactRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 14); + +/** + * + * Response message for updating an Artifact. + * + * @generated from message flyteidl2.datacatalog.UpdateArtifactResponse + */ +export type UpdateArtifactResponse = Message<"flyteidl2.datacatalog.UpdateArtifactResponse"> & { + /** + * The unique ID of the artifact updated + * + * @generated from field: string artifact_id = 1; + */ + artifactId: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.UpdateArtifactResponse. + * Use `create(UpdateArtifactResponseSchema)` to create a new message. + */ +export const UpdateArtifactResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 15); + +/** + * + * ReservationID message that is composed of several string fields. + * + * @generated from message flyteidl2.datacatalog.ReservationID + */ +export type ReservationID = Message<"flyteidl2.datacatalog.ReservationID"> & { + /** + * The unique ID for the reserved dataset + * + * @generated from field: flyteidl2.datacatalog.DatasetID dataset_id = 1; + */ + datasetId?: DatasetID; + + /** + * The specific artifact tag for the reservation + * + * @generated from field: string tag_name = 2; + */ + tagName: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.ReservationID. + * Use `create(ReservationIDSchema)` to create a new message. + */ +export const ReservationIDSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 16); + +/** + * Try to acquire or extend an artifact reservation. If an active reservation exists, retrieve that instance. + * + * @generated from message flyteidl2.datacatalog.GetOrExtendReservationRequest + */ +export type GetOrExtendReservationRequest = Message<"flyteidl2.datacatalog.GetOrExtendReservationRequest"> & { + /** + * The unique ID for the reservation + * + * @generated from field: flyteidl2.datacatalog.ReservationID reservation_id = 1; + */ + reservationId?: ReservationID; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; + + /** + * Requested reservation extension heartbeat interval + * + * @generated from field: google.protobuf.Duration heartbeat_interval = 3; + */ + heartbeatInterval?: Duration; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetOrExtendReservationRequest. + * Use `create(GetOrExtendReservationRequestSchema)` to create a new message. + */ +export const GetOrExtendReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 17); + +/** + * A reservation including owner, heartbeat interval, expiration timestamp, and various metadata. + * + * @generated from message flyteidl2.datacatalog.Reservation + */ +export type Reservation = Message<"flyteidl2.datacatalog.Reservation"> & { + /** + * The unique ID for the reservation + * + * @generated from field: flyteidl2.datacatalog.ReservationID reservation_id = 1; + */ + reservationId?: ReservationID; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; + + /** + * Recommended heartbeat interval to extend reservation + * + * @generated from field: google.protobuf.Duration heartbeat_interval = 3; + */ + heartbeatInterval?: Duration; + + /** + * Expiration timestamp of this reservation + * + * @generated from field: google.protobuf.Timestamp expires_at = 4; + */ + expiresAt?: Timestamp; + + /** + * Free-form metadata associated with the artifact + * + * @generated from field: flyteidl2.datacatalog.Metadata metadata = 6; + */ + metadata?: Metadata; +}; + +/** + * Describes the message flyteidl2.datacatalog.Reservation. + * Use `create(ReservationSchema)` to create a new message. + */ +export const ReservationSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 18); + +/** + * Response including either a newly minted reservation or the existing reservation + * + * @generated from message flyteidl2.datacatalog.GetOrExtendReservationResponse + */ +export type GetOrExtendReservationResponse = Message<"flyteidl2.datacatalog.GetOrExtendReservationResponse"> & { + /** + * The reservation to be acquired or extended + * + * @generated from field: flyteidl2.datacatalog.Reservation reservation = 1; + */ + reservation?: Reservation; +}; + +/** + * Describes the message flyteidl2.datacatalog.GetOrExtendReservationResponse. + * Use `create(GetOrExtendReservationResponseSchema)` to create a new message. + */ +export const GetOrExtendReservationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 19); + +/** + * Request to release reservation + * + * @generated from message flyteidl2.datacatalog.ReleaseReservationRequest + */ +export type ReleaseReservationRequest = Message<"flyteidl2.datacatalog.ReleaseReservationRequest"> & { + /** + * The unique ID for the reservation + * + * @generated from field: flyteidl2.datacatalog.ReservationID reservation_id = 1; + */ + reservationId?: ReservationID; + + /** + * The unique ID of the owner for the reservation + * + * @generated from field: string owner_id = 2; + */ + ownerId: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.ReleaseReservationRequest. + * Use `create(ReleaseReservationRequestSchema)` to create a new message. + */ +export const ReleaseReservationRequestSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 20); + +/** + * Response to release reservation + * + * @generated from message flyteidl2.datacatalog.ReleaseReservationResponse + */ +export type ReleaseReservationResponse = Message<"flyteidl2.datacatalog.ReleaseReservationResponse"> & { +}; + +/** + * Describes the message flyteidl2.datacatalog.ReleaseReservationResponse. + * Use `create(ReleaseReservationResponseSchema)` to create a new message. + */ +export const ReleaseReservationResponseSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 21); + +/** + * + * Dataset message. It is uniquely identified by DatasetID. + * + * @generated from message flyteidl2.datacatalog.Dataset + */ +export type Dataset = Message<"flyteidl2.datacatalog.Dataset"> & { + /** + * @generated from field: flyteidl2.datacatalog.DatasetID id = 1; + */ + id?: DatasetID; + + /** + * @generated from field: flyteidl2.datacatalog.Metadata metadata = 2; + */ + metadata?: Metadata; + + /** + * @generated from field: repeated string partitionKeys = 3; + */ + partitionKeys: string[]; +}; + +/** + * Describes the message flyteidl2.datacatalog.Dataset. + * Use `create(DatasetSchema)` to create a new message. + */ +export const DatasetSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 22); + +/** + * + * An artifact could have multiple partitions and each partition can have an arbitrary string key/value pair + * + * @generated from message flyteidl2.datacatalog.Partition + */ +export type Partition = Message<"flyteidl2.datacatalog.Partition"> & { + /** + * @generated from field: string key = 1; + */ + key: string; + + /** + * @generated from field: string value = 2; + */ + value: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.Partition. + * Use `create(PartitionSchema)` to create a new message. + */ +export const PartitionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 23); + +/** + * + * DatasetID message that is composed of several string fields. + * + * @generated from message flyteidl2.datacatalog.DatasetID + */ +export type DatasetID = Message<"flyteidl2.datacatalog.DatasetID"> & { + /** + * The name of the project + * + * @generated from field: string project = 1; + */ + project: string; + + /** + * The name of the dataset + * + * @generated from field: string name = 2; + */ + name: string; + + /** + * The domain (eg. environment) + * + * @generated from field: string domain = 3; + */ + domain: string; + + /** + * Version of the data schema + * + * @generated from field: string version = 4; + */ + version: string; + + /** + * UUID for the dataset (if set the above fields are optional) + * + * @generated from field: string UUID = 5; + */ + UUID: string; + + /** + * Optional, org key applied to the resource. + * + * @generated from field: string org = 6; + */ + org: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.DatasetID. + * Use `create(DatasetIDSchema)` to create a new message. + */ +export const DatasetIDSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 24); + +/** + * + * Artifact message. It is composed of several string fields. + * + * @generated from message flyteidl2.datacatalog.Artifact + */ +export type Artifact = Message<"flyteidl2.datacatalog.Artifact"> & { + /** + * The unique ID of the artifact + * + * @generated from field: string id = 1; + */ + id: string; + + /** + * The Dataset that the artifact belongs to + * + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 2; + */ + dataset?: DatasetID; + + /** + * A list of data that is associated with the artifact + * + * @generated from field: repeated flyteidl2.datacatalog.ArtifactData data = 3; + */ + data: ArtifactData[]; + + /** + * Free-form metadata associated with the artifact + * + * @generated from field: flyteidl2.datacatalog.Metadata metadata = 4; + */ + metadata?: Metadata; + + /** + * @generated from field: repeated flyteidl2.datacatalog.Partition partitions = 5; + */ + partitions: Partition[]; + + /** + * @generated from field: repeated flyteidl2.datacatalog.Tag tags = 6; + */ + tags: Tag[]; + + /** + * creation timestamp of artifact, autogenerated by service + * + * @generated from field: google.protobuf.Timestamp created_at = 7; + */ + createdAt?: Timestamp; +}; + +/** + * Describes the message flyteidl2.datacatalog.Artifact. + * Use `create(ArtifactSchema)` to create a new message. + */ +export const ArtifactSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 25); + +/** + * + * ArtifactData that belongs to an artifact + * + * @generated from message flyteidl2.datacatalog.ArtifactData + */ +export type ArtifactData = Message<"flyteidl2.datacatalog.ArtifactData"> & { + /** + * @generated from field: string name = 1; + */ + name: string; + + /** + * @generated from field: flyteidl2.core.Literal value = 2; + */ + value?: Literal; +}; + +/** + * Describes the message flyteidl2.datacatalog.ArtifactData. + * Use `create(ArtifactDataSchema)` to create a new message. + */ +export const ArtifactDataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 26); + +/** + * + * Tag message that is unique to a Dataset. It is associated to a single artifact and + * can be retrieved by name later. + * + * @generated from message flyteidl2.datacatalog.Tag + */ +export type Tag = Message<"flyteidl2.datacatalog.Tag"> & { + /** + * Name of tag + * + * @generated from field: string name = 1; + */ + name: string; + + /** + * The tagged artifact + * + * @generated from field: string artifact_id = 2; + */ + artifactId: string; + + /** + * The Dataset that this tag belongs to + * + * @generated from field: flyteidl2.datacatalog.DatasetID dataset = 3; + */ + dataset?: DatasetID; +}; + +/** + * Describes the message flyteidl2.datacatalog.Tag. + * Use `create(TagSchema)` to create a new message. + */ +export const TagSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 27); + +/** + * + * Metadata representation for artifacts and datasets + * + * @generated from message flyteidl2.datacatalog.Metadata + */ +export type Metadata = Message<"flyteidl2.datacatalog.Metadata"> & { + /** + * key map is a dictionary of key/val strings that represent metadata + * + * @generated from field: map key_map = 1; + */ + keyMap: { [key: string]: string }; +}; + +/** + * Describes the message flyteidl2.datacatalog.Metadata. + * Use `create(MetadataSchema)` to create a new message. + */ +export const MetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 28); + +/** + * Filter expression that is composed of a combination of single filters + * + * @generated from message flyteidl2.datacatalog.FilterExpression + */ +export type FilterExpression = Message<"flyteidl2.datacatalog.FilterExpression"> & { + /** + * @generated from field: repeated flyteidl2.datacatalog.SinglePropertyFilter filters = 1; + */ + filters: SinglePropertyFilter[]; +}; + +/** + * Describes the message flyteidl2.datacatalog.FilterExpression. + * Use `create(FilterExpressionSchema)` to create a new message. + */ +export const FilterExpressionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 29); + +/** + * A single property to filter on. + * + * @generated from message flyteidl2.datacatalog.SinglePropertyFilter + */ +export type SinglePropertyFilter = Message<"flyteidl2.datacatalog.SinglePropertyFilter"> & { + /** + * @generated from oneof flyteidl2.datacatalog.SinglePropertyFilter.property_filter + */ + propertyFilter: { + /** + * @generated from field: flyteidl2.datacatalog.TagPropertyFilter tag_filter = 1; + */ + value: TagPropertyFilter; + case: "tagFilter"; + } | { + /** + * @generated from field: flyteidl2.datacatalog.PartitionPropertyFilter partition_filter = 2; + */ + value: PartitionPropertyFilter; + case: "partitionFilter"; + } | { + /** + * @generated from field: flyteidl2.datacatalog.ArtifactPropertyFilter artifact_filter = 3; + */ + value: ArtifactPropertyFilter; + case: "artifactFilter"; + } | { + /** + * @generated from field: flyteidl2.datacatalog.DatasetPropertyFilter dataset_filter = 4; + */ + value: DatasetPropertyFilter; + case: "datasetFilter"; + } | { case: undefined; value?: undefined }; + + /** + * field 10 in case we add more entities to query + * + * @generated from field: flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperator operator = 10; + */ + operator: SinglePropertyFilter_ComparisonOperator; +}; + +/** + * Describes the message flyteidl2.datacatalog.SinglePropertyFilter. + * Use `create(SinglePropertyFilterSchema)` to create a new message. + */ +export const SinglePropertyFilterSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 30); + +/** + * as use-cases come up we can add more operators, ex: gte, like, not eq etc. + * + * @generated from enum flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperator + */ +export enum SinglePropertyFilter_ComparisonOperator { + /** + * @generated from enum value: EQUALS = 0; + */ + EQUALS = 0, +} + +/** + * Describes the enum flyteidl2.datacatalog.SinglePropertyFilter.ComparisonOperator. + */ +export const SinglePropertyFilter_ComparisonOperatorSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_datacatalog_datacatalog, 30, 0); + +/** + * Artifact properties we can filter by + * + * @generated from message flyteidl2.datacatalog.ArtifactPropertyFilter + */ +export type ArtifactPropertyFilter = Message<"flyteidl2.datacatalog.ArtifactPropertyFilter"> & { + /** + * oneof because we can add more properties in the future + * + * @generated from oneof flyteidl2.datacatalog.ArtifactPropertyFilter.property + */ + property: { + /** + * @generated from field: string artifact_id = 1; + */ + value: string; + case: "artifactId"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.datacatalog.ArtifactPropertyFilter. + * Use `create(ArtifactPropertyFilterSchema)` to create a new message. + */ +export const ArtifactPropertyFilterSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 31); + +/** + * Tag properties we can filter by + * + * @generated from message flyteidl2.datacatalog.TagPropertyFilter + */ +export type TagPropertyFilter = Message<"flyteidl2.datacatalog.TagPropertyFilter"> & { + /** + * @generated from oneof flyteidl2.datacatalog.TagPropertyFilter.property + */ + property: { + /** + * @generated from field: string tag_name = 1; + */ + value: string; + case: "tagName"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.datacatalog.TagPropertyFilter. + * Use `create(TagPropertyFilterSchema)` to create a new message. + */ +export const TagPropertyFilterSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 32); + +/** + * Partition properties we can filter by + * + * @generated from message flyteidl2.datacatalog.PartitionPropertyFilter + */ +export type PartitionPropertyFilter = Message<"flyteidl2.datacatalog.PartitionPropertyFilter"> & { + /** + * @generated from oneof flyteidl2.datacatalog.PartitionPropertyFilter.property + */ + property: { + /** + * @generated from field: flyteidl2.datacatalog.KeyValuePair key_val = 1; + */ + value: KeyValuePair; + case: "keyVal"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.datacatalog.PartitionPropertyFilter. + * Use `create(PartitionPropertyFilterSchema)` to create a new message. + */ +export const PartitionPropertyFilterSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 33); + +/** + * @generated from message flyteidl2.datacatalog.KeyValuePair + */ +export type KeyValuePair = Message<"flyteidl2.datacatalog.KeyValuePair"> & { + /** + * @generated from field: string key = 1; + */ + key: string; + + /** + * @generated from field: string value = 2; + */ + value: string; +}; + +/** + * Describes the message flyteidl2.datacatalog.KeyValuePair. + * Use `create(KeyValuePairSchema)` to create a new message. + */ +export const KeyValuePairSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 34); + +/** + * Dataset properties we can filter by + * + * @generated from message flyteidl2.datacatalog.DatasetPropertyFilter + */ +export type DatasetPropertyFilter = Message<"flyteidl2.datacatalog.DatasetPropertyFilter"> & { + /** + * @generated from oneof flyteidl2.datacatalog.DatasetPropertyFilter.property + */ + property: { + /** + * @generated from field: string project = 1; + */ + value: string; + case: "project"; + } | { + /** + * @generated from field: string name = 2; + */ + value: string; + case: "name"; + } | { + /** + * @generated from field: string domain = 3; + */ + value: string; + case: "domain"; + } | { + /** + * @generated from field: string version = 4; + */ + value: string; + case: "version"; + } | { + /** + * Optional, org key applied to the dataset. + * + * @generated from field: string org = 5; + */ + value: string; + case: "org"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.datacatalog.DatasetPropertyFilter. + * Use `create(DatasetPropertyFilterSchema)` to create a new message. + */ +export const DatasetPropertyFilterSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 35); + +/** + * Pagination options for making list requests + * + * @generated from message flyteidl2.datacatalog.PaginationOptions + */ +export type PaginationOptions = Message<"flyteidl2.datacatalog.PaginationOptions"> & { + /** + * the max number of results to return + * + * @generated from field: uint32 limit = 1; + */ + limit: number; + + /** + * the token to pass to fetch the next page + * + * @generated from field: string token = 2; + */ + token: string; + + /** + * the property that we want to sort the results by + * + * @generated from field: flyteidl2.datacatalog.PaginationOptions.SortKey sortKey = 3; + */ + sortKey: PaginationOptions_SortKey; + + /** + * the sort order of the results + * + * @generated from field: flyteidl2.datacatalog.PaginationOptions.SortOrder sortOrder = 4; + */ + sortOrder: PaginationOptions_SortOrder; +}; + +/** + * Describes the message flyteidl2.datacatalog.PaginationOptions. + * Use `create(PaginationOptionsSchema)` to create a new message. + */ +export const PaginationOptionsSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_datacatalog_datacatalog, 36); + +/** + * @generated from enum flyteidl2.datacatalog.PaginationOptions.SortOrder + */ +export enum PaginationOptions_SortOrder { + /** + * @generated from enum value: DESCENDING = 0; + */ + DESCENDING = 0, + + /** + * @generated from enum value: ASCENDING = 1; + */ + ASCENDING = 1, +} + +/** + * Describes the enum flyteidl2.datacatalog.PaginationOptions.SortOrder. + */ +export const PaginationOptions_SortOrderSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_datacatalog_datacatalog, 36, 0); + +/** + * @generated from enum flyteidl2.datacatalog.PaginationOptions.SortKey + */ +export enum PaginationOptions_SortKey { + /** + * @generated from enum value: CREATION_TIME = 0; + */ + CREATION_TIME = 0, +} + +/** + * Describes the enum flyteidl2.datacatalog.PaginationOptions.SortKey. + */ +export const PaginationOptions_SortKeySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_datacatalog_datacatalog, 36, 1); + +/** + * + * Data Catalog service definition + * Data Catalog is a service for indexing parameterized, strongly-typed data artifacts across revisions. + * Artifacts are associated with a Dataset, and can be tagged for retrieval. + * + * @generated from service flyteidl2.datacatalog.DataCatalog + */ +export const DataCatalog: GenService<{ + /** + * Create a new Dataset. Datasets are unique based on the DatasetID. Datasets are logical groupings of artifacts. + * Each dataset can have one or more artifacts + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.CreateDataset + */ + createDataset: { + methodKind: "unary"; + input: typeof CreateDatasetRequestSchema; + output: typeof CreateDatasetResponseSchema; + }, + /** + * Get a Dataset by the DatasetID. This returns the Dataset with the associated metadata. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.GetDataset + */ + getDataset: { + methodKind: "unary"; + input: typeof GetDatasetRequestSchema; + output: typeof GetDatasetResponseSchema; + }, + /** + * Create an artifact and the artifact data associated with it. An artifact can be a hive partition or arbitrary + * files or data values + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.CreateArtifact + */ + createArtifact: { + methodKind: "unary"; + input: typeof CreateArtifactRequestSchema; + output: typeof CreateArtifactResponseSchema; + }, + /** + * Retrieve an artifact by an identifying handle. This returns an artifact along with the artifact data. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.GetArtifact + */ + getArtifact: { + methodKind: "unary"; + input: typeof GetArtifactRequestSchema; + output: typeof GetArtifactResponseSchema; + }, + /** + * Associate a tag with an artifact. Tags are unique within a Dataset. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.AddTag + */ + addTag: { + methodKind: "unary"; + input: typeof AddTagRequestSchema; + output: typeof AddTagResponseSchema; + }, + /** + * Return a paginated list of artifacts + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.ListArtifacts + */ + listArtifacts: { + methodKind: "unary"; + input: typeof ListArtifactsRequestSchema; + output: typeof ListArtifactsResponseSchema; + }, + /** + * Return a paginated list of datasets + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.ListDatasets + */ + listDatasets: { + methodKind: "unary"; + input: typeof ListDatasetsRequestSchema; + output: typeof ListDatasetsResponseSchema; + }, + /** + * Updates an existing artifact, overwriting the stored artifact data in the underlying blob storage. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.UpdateArtifact + */ + updateArtifact: { + methodKind: "unary"; + input: typeof UpdateArtifactRequestSchema; + output: typeof UpdateArtifactResponseSchema; + }, + /** + * Attempts to get or extend a reservation for the corresponding artifact. If one already exists + * (ie. another entity owns the reservation) then that reservation is retrieved. + * Once you acquire a reservation, you need to periodically extend the reservation with an + * identical call. If the reservation is not extended before the defined expiration, it may be + * acquired by another task. + * Note: We may have multiple concurrent tasks with the same signature and the same input that + * try to populate the same artifact at the same time. Thus with reservation, only one task can + * run at a time, until the reservation expires. + * Note: If task A does not extend the reservation in time and the reservation expires, another + * task B may take over the reservation, resulting in two tasks A and B running in parallel. So + * a third task C may get the Artifact from A or B, whichever writes last. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.GetOrExtendReservation + */ + getOrExtendReservation: { + methodKind: "unary"; + input: typeof GetOrExtendReservationRequestSchema; + output: typeof GetOrExtendReservationResponseSchema; + }, + /** + * Release the reservation when the task holding the spot fails so that the other tasks + * can grab the spot. + * + * @generated from rpc flyteidl2.datacatalog.DataCatalog.ReleaseReservation + */ + releaseReservation: { + methodKind: "unary"; + input: typeof ReleaseReservationRequestSchema; + output: typeof ReleaseReservationResponseSchema; + }, +}> = /*@__PURE__*/ + serviceDesc(file_flyteidl2_datacatalog_datacatalog, 0); + diff --git a/gen/ts/flyteidl2/event/cloudevents_pb.ts b/gen/ts/flyteidl2/event/cloudevents_pb.ts new file mode 100644 index 0000000000..a57da06ef4 --- /dev/null +++ b/gen/ts/flyteidl2/event/cloudevents_pb.ts @@ -0,0 +1,217 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/event/cloudevents.proto (package flyteidl2.event, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { ArtifactID } from "../core/artifact_id_pb.ts"; +import { file_flyteidl2_core_artifact_id } from "../core/artifact_id_pb.ts"; +import type { Identifier, TaskExecutionIdentifier, WorkflowExecutionIdentifier } from "../core/identifier_pb.ts"; +import { file_flyteidl2_core_identifier } from "../core/identifier_pb.ts"; +import type { TypedInterface } from "../core/interface_pb.ts"; +import { file_flyteidl2_core_interface } from "../core/interface_pb.ts"; +import type { NodeExecutionEvent, TaskExecutionEvent, WorkflowExecutionEvent } from "./event_pb.ts"; +import { file_flyteidl2_event_event } from "./event_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/event/cloudevents.proto. + */ +export const file_flyteidl2_event_cloudevents: GenFile = /*@__PURE__*/ + fileDesc("CiFmbHl0ZWlkbDIvZXZlbnQvY2xvdWRldmVudHMucHJvdG8SD2ZseXRlaWRsMi5ldmVudCLPAwobQ2xvdWRFdmVudFdvcmtmbG93RXhlY3V0aW9uEjoKCXJhd19ldmVudBgBIAEoCzInLmZseXRlaWRsMi5ldmVudC5Xb3JrZmxvd0V4ZWN1dGlvbkV2ZW50EjgKEG91dHB1dF9pbnRlcmZhY2UYAiABKAsyHi5mbHl0ZWlkbDIuY29yZS5UeXBlZEludGVyZmFjZRIwCgxhcnRpZmFjdF9pZHMYAyADKAsyGi5mbHl0ZWlkbDIuY29yZS5BcnRpZmFjdElEEkgKE3JlZmVyZW5jZV9leGVjdXRpb24YBCABKAsyKy5mbHl0ZWlkbDIuY29yZS5Xb3JrZmxvd0V4ZWN1dGlvbklkZW50aWZpZXISEQoJcHJpbmNpcGFsGAUgASgJEjIKDmxhdW5jaF9wbGFuX2lkGAYgASgLMhouZmx5dGVpZGwyLmNvcmUuSWRlbnRpZmllchJICgZsYWJlbHMYByADKAsyOC5mbHl0ZWlkbDIuZXZlbnQuQ2xvdWRFdmVudFdvcmtmbG93RXhlY3V0aW9uLkxhYmVsc0VudHJ5Gi0KC0xhYmVsc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEiuAMKF0Nsb3VkRXZlbnROb2RlRXhlY3V0aW9uEjYKCXJhd19ldmVudBgBIAEoCzIjLmZseXRlaWRsMi5ldmVudC5Ob2RlRXhlY3V0aW9uRXZlbnQSPQoMdGFza19leGVjX2lkGAIgASgLMicuZmx5dGVpZGwyLmNvcmUuVGFza0V4ZWN1dGlvbklkZW50aWZpZXISOAoQb3V0cHV0X2ludGVyZmFjZRgDIAEoCzIeLmZseXRlaWRsMi5jb3JlLlR5cGVkSW50ZXJmYWNlEjAKDGFydGlmYWN0X2lkcxgEIAMoCzIaLmZseXRlaWRsMi5jb3JlLkFydGlmYWN0SUQSEQoJcHJpbmNpcGFsGAUgASgJEjIKDmxhdW5jaF9wbGFuX2lkGAYgASgLMhouZmx5dGVpZGwyLmNvcmUuSWRlbnRpZmllchJECgZsYWJlbHMYByADKAsyNC5mbHl0ZWlkbDIuZXZlbnQuQ2xvdWRFdmVudE5vZGVFeGVjdXRpb24uTGFiZWxzRW50cnkaLQoLTGFiZWxzRW50cnkSCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJOgI4ASLGAQoXQ2xvdWRFdmVudFRhc2tFeGVjdXRpb24SNgoJcmF3X2V2ZW50GAEgASgLMiMuZmx5dGVpZGwyLmV2ZW50LlRhc2tFeGVjdXRpb25FdmVudBJECgZsYWJlbHMYAiADKAsyNC5mbHl0ZWlkbDIuZXZlbnQuQ2xvdWRFdmVudFRhc2tFeGVjdXRpb24uTGFiZWxzRW50cnkaLQoLTGFiZWxzRW50cnkSCwoDa2V5GAEgASgJEg0KBXZhbHVlGAIgASgJOgI4ASKiAgoYQ2xvdWRFdmVudEV4ZWN1dGlvblN0YXJ0EkEKDGV4ZWN1dGlvbl9pZBgBIAEoCzIrLmZseXRlaWRsMi5jb3JlLldvcmtmbG93RXhlY3V0aW9uSWRlbnRpZmllchIyCg5sYXVuY2hfcGxhbl9pZBgCIAEoCzIaLmZseXRlaWRsMi5jb3JlLklkZW50aWZpZXISLwoLd29ya2Zsb3dfaWQYAyABKAsyGi5mbHl0ZWlkbDIuY29yZS5JZGVudGlmaWVyEjAKDGFydGlmYWN0X2lkcxgEIAMoCzIaLmZseXRlaWRsMi5jb3JlLkFydGlmYWN0SUQSGQoRYXJ0aWZhY3RfdHJhY2tlcnMYBSADKAkSEQoJcHJpbmNpcGFsGAYgASgJQrsBChNjb20uZmx5dGVpZGwyLmV2ZW50QhBDbG91ZGV2ZW50c1Byb3RvSAJQAVozZ2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL2V2ZW50ogIDRkVYqgIPRmx5dGVpZGwyLkV2ZW50ygIPRmx5dGVpZGwyXEV2ZW504gIbRmx5dGVpZGwyXEV2ZW50XEdQQk1ldGFkYXRh6gIQRmx5dGVpZGwyOjpFdmVudGIGcHJvdG8z", [file_flyteidl2_core_artifact_id, file_flyteidl2_core_identifier, file_flyteidl2_core_interface, file_flyteidl2_event_event]); + +/** + * This is the cloud event parallel to the raw WorkflowExecutionEvent message. It's filled in with additional + * information that downstream consumers may find useful. + * + * @generated from message flyteidl2.event.CloudEventWorkflowExecution + */ +export type CloudEventWorkflowExecution = Message<"flyteidl2.event.CloudEventWorkflowExecution"> & { + /** + * @generated from field: flyteidl2.event.WorkflowExecutionEvent raw_event = 1; + */ + rawEvent?: WorkflowExecutionEvent; + + /** + * @generated from field: flyteidl2.core.TypedInterface output_interface = 2; + */ + outputInterface?: TypedInterface; + + /** + * The following are ExecutionMetadata fields + * We can't have the ExecutionMetadata object directly because of import cycle + * + * @generated from field: repeated flyteidl2.core.ArtifactID artifact_ids = 3; + */ + artifactIds: ArtifactID[]; + + /** + * @generated from field: flyteidl2.core.WorkflowExecutionIdentifier reference_execution = 4; + */ + referenceExecution?: WorkflowExecutionIdentifier; + + /** + * @generated from field: string principal = 5; + */ + principal: string; + + /** + * The ID of the LP that generated the execution that generated the Artifact. + * Here for provenance information. + * Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + * + * @generated from field: flyteidl2.core.Identifier launch_plan_id = 6; + */ + launchPlanId?: Identifier; + + /** + * We can't have the ExecutionMetadata object directly because of import cycle + * + * @generated from field: map labels = 7; + */ + labels: { [key: string]: string }; +}; + +/** + * Describes the message flyteidl2.event.CloudEventWorkflowExecution. + * Use `create(CloudEventWorkflowExecutionSchema)` to create a new message. + */ +export const CloudEventWorkflowExecutionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_cloudevents, 0); + +/** + * @generated from message flyteidl2.event.CloudEventNodeExecution + */ +export type CloudEventNodeExecution = Message<"flyteidl2.event.CloudEventNodeExecution"> & { + /** + * @generated from field: flyteidl2.event.NodeExecutionEvent raw_event = 1; + */ + rawEvent?: NodeExecutionEvent; + + /** + * The relevant task execution if applicable + * + * @generated from field: flyteidl2.core.TaskExecutionIdentifier task_exec_id = 2; + */ + taskExecId?: TaskExecutionIdentifier; + + /** + * The typed interface for the task that produced the event. + * + * @generated from field: flyteidl2.core.TypedInterface output_interface = 3; + */ + outputInterface?: TypedInterface; + + /** + * The following are ExecutionMetadata fields + * We can't have the ExecutionMetadata object directly because of import cycle + * + * @generated from field: repeated flyteidl2.core.ArtifactID artifact_ids = 4; + */ + artifactIds: ArtifactID[]; + + /** + * @generated from field: string principal = 5; + */ + principal: string; + + /** + * The ID of the LP that generated the execution that generated the Artifact. + * Here for provenance information. + * Launch plan IDs are easier to get than workflow IDs so we'll use these for now. + * + * @generated from field: flyteidl2.core.Identifier launch_plan_id = 6; + */ + launchPlanId?: Identifier; + + /** + * We can't have the ExecutionMetadata object directly because of import cycle + * + * @generated from field: map labels = 7; + */ + labels: { [key: string]: string }; +}; + +/** + * Describes the message flyteidl2.event.CloudEventNodeExecution. + * Use `create(CloudEventNodeExecutionSchema)` to create a new message. + */ +export const CloudEventNodeExecutionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_cloudevents, 1); + +/** + * @generated from message flyteidl2.event.CloudEventTaskExecution + */ +export type CloudEventTaskExecution = Message<"flyteidl2.event.CloudEventTaskExecution"> & { + /** + * @generated from field: flyteidl2.event.TaskExecutionEvent raw_event = 1; + */ + rawEvent?: TaskExecutionEvent; + + /** + * We can't have the ExecutionMetadata object directly because of import cycle + * + * @generated from field: map labels = 2; + */ + labels: { [key: string]: string }; +}; + +/** + * Describes the message flyteidl2.event.CloudEventTaskExecution. + * Use `create(CloudEventTaskExecutionSchema)` to create a new message. + */ +export const CloudEventTaskExecutionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_cloudevents, 2); + +/** + * This event is to be sent by Admin after it creates an execution. + * + * @generated from message flyteidl2.event.CloudEventExecutionStart + */ +export type CloudEventExecutionStart = Message<"flyteidl2.event.CloudEventExecutionStart"> & { + /** + * The execution created. + * + * @generated from field: flyteidl2.core.WorkflowExecutionIdentifier execution_id = 1; + */ + executionId?: WorkflowExecutionIdentifier; + + /** + * The launch plan used. + * + * @generated from field: flyteidl2.core.Identifier launch_plan_id = 2; + */ + launchPlanId?: Identifier; + + /** + * @generated from field: flyteidl2.core.Identifier workflow_id = 3; + */ + workflowId?: Identifier; + + /** + * Artifact inputs to the workflow execution for which we have the full Artifact ID. These are likely the result of artifact queries that are run. + * + * @generated from field: repeated flyteidl2.core.ArtifactID artifact_ids = 4; + */ + artifactIds: ArtifactID[]; + + /** + * Artifact inputs to the workflow execution for which we only have the tracking bit that's installed into the Literal's metadata by the Artifact service. + * + * @generated from field: repeated string artifact_trackers = 5; + */ + artifactTrackers: string[]; + + /** + * @generated from field: string principal = 6; + */ + principal: string; +}; + +/** + * Describes the message flyteidl2.event.CloudEventExecutionStart. + * Use `create(CloudEventExecutionStartSchema)` to create a new message. + */ +export const CloudEventExecutionStartSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_cloudevents, 3); + diff --git a/gen/ts/flyteidl2/event/event_pb.ts b/gen/ts/flyteidl2/event/event_pb.ts new file mode 100644 index 0000000000..f4abccaf5b --- /dev/null +++ b/gen/ts/flyteidl2/event/event_pb.ts @@ -0,0 +1,818 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/event/event.proto (package flyteidl2.event, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { CatalogCacheStatus, CatalogMetadata, CatalogReservation_Status } from "../core/catalog_pb.ts"; +import { file_flyteidl2_core_catalog } from "../core/catalog_pb.ts"; +import type { ExecutionError, LogContext, NodeExecution_Phase, TaskExecution_Phase, TaskLog, WorkflowExecution_Phase } from "../core/execution_pb.ts"; +import { file_flyteidl2_core_execution } from "../core/execution_pb.ts"; +import type { Identifier, NodeExecutionIdentifier, TaskExecutionIdentifier, WorkflowExecutionIdentifier } from "../core/identifier_pb.ts"; +import { file_flyteidl2_core_identifier } from "../core/identifier_pb.ts"; +import type { LiteralMap } from "../core/literals_pb.ts"; +import { file_flyteidl2_core_literals } from "../core/literals_pb.ts"; +import type { Timestamp } from "@bufbuild/protobuf/wkt"; +import { file_google_protobuf_struct, file_google_protobuf_timestamp } from "@bufbuild/protobuf/wkt"; +import type { JsonObject, Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/event/event.proto. + */ +export const file_flyteidl2_event_event: GenFile = /*@__PURE__*/ + fileDesc("ChtmbHl0ZWlkbDIvZXZlbnQvZXZlbnQucHJvdG8SD2ZseXRlaWRsMi5ldmVudCLkAgoWV29ya2Zsb3dFeGVjdXRpb25FdmVudBJBCgxleGVjdXRpb25faWQYASABKAsyKy5mbHl0ZWlkbDIuY29yZS5Xb3JrZmxvd0V4ZWN1dGlvbklkZW50aWZpZXISEwoLcHJvZHVjZXJfaWQYAiABKAkSNgoFcGhhc2UYAyABKA4yJy5mbHl0ZWlkbDIuY29yZS5Xb3JrZmxvd0V4ZWN1dGlvbi5QaGFzZRIvCgtvY2N1cnJlZF9hdBgEIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASFAoKb3V0cHV0X3VyaRgFIAEoCUgAEi8KBWVycm9yGAYgASgLMh4uZmx5dGVpZGwyLmNvcmUuRXhlY3V0aW9uRXJyb3JIABIxCgtvdXRwdXRfZGF0YRgHIAEoCzIaLmZseXRlaWRsMi5jb3JlLkxpdGVyYWxNYXBIAEIPCg1vdXRwdXRfcmVzdWx0IowIChJOb2RlRXhlY3V0aW9uRXZlbnQSMwoCaWQYASABKAsyJy5mbHl0ZWlkbDIuY29yZS5Ob2RlRXhlY3V0aW9uSWRlbnRpZmllchITCgtwcm9kdWNlcl9pZBgCIAEoCRIyCgVwaGFzZRgDIAEoDjIjLmZseXRlaWRsMi5jb3JlLk5vZGVFeGVjdXRpb24uUGhhc2USLwoLb2NjdXJyZWRfYXQYBCABKAsyGi5nb29nbGUucHJvdG9idWYuVGltZXN0YW1wEhMKCWlucHV0X3VyaRgFIAEoCUgAEjAKCmlucHV0X2RhdGEYFCABKAsyGi5mbHl0ZWlkbDIuY29yZS5MaXRlcmFsTWFwSAASFAoKb3V0cHV0X3VyaRgGIAEoCUgBEi8KBWVycm9yGAcgASgLMh4uZmx5dGVpZGwyLmNvcmUuRXhlY3V0aW9uRXJyb3JIARIxCgtvdXRwdXRfZGF0YRgPIAEoCzIaLmZseXRlaWRsMi5jb3JlLkxpdGVyYWxNYXBIARJHChZ3b3JrZmxvd19ub2RlX21ldGFkYXRhGAggASgLMiUuZmx5dGVpZGwyLmV2ZW50LldvcmtmbG93Tm9kZU1ldGFkYXRhSAISPwoSdGFza19ub2RlX21ldGFkYXRhGA4gASgLMiEuZmx5dGVpZGwyLmV2ZW50LlRhc2tOb2RlTWV0YWRhdGFIAhJKChRwYXJlbnRfdGFza19tZXRhZGF0YRgJIAEoCzIsLmZseXRlaWRsMi5ldmVudC5QYXJlbnRUYXNrRXhlY3V0aW9uTWV0YWRhdGESSgoUcGFyZW50X25vZGVfbWV0YWRhdGEYCiABKAsyLC5mbHl0ZWlkbDIuZXZlbnQuUGFyZW50Tm9kZUV4ZWN1dGlvbk1ldGFkYXRhEhMKC3JldHJ5X2dyb3VwGAsgASgJEhQKDHNwZWNfbm9kZV9pZBgMIAEoCRIRCglub2RlX25hbWUYDSABKAkSFQoNZXZlbnRfdmVyc2lvbhgQIAEoBRIRCglpc19wYXJlbnQYESABKAgSEgoKaXNfZHluYW1pYxgSIAEoCBIQCghkZWNrX3VyaRgTIAEoCRIvCgtyZXBvcnRlZF9hdBgVIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASEAoIaXNfYXJyYXkYFiABKAgSMQoNdGFyZ2V0X2VudGl0eRgXIAEoCzIaLmZseXRlaWRsMi5jb3JlLklkZW50aWZpZXISGwoTaXNfaW5fZHluYW1pY19jaGFpbhgYIAEoCBIQCghpc19lYWdlchgZIAEoCEINCgtpbnB1dF92YWx1ZUIPCg1vdXRwdXRfcmVzdWx0QhEKD3RhcmdldF9tZXRhZGF0YSJZChRXb3JrZmxvd05vZGVNZXRhZGF0YRJBCgxleGVjdXRpb25faWQYASABKAsyKy5mbHl0ZWlkbDIuY29yZS5Xb3JrZmxvd0V4ZWN1dGlvbklkZW50aWZpZXIi4QEKEFRhc2tOb2RlTWV0YWRhdGESOAoMY2FjaGVfc3RhdHVzGAEgASgOMiIuZmx5dGVpZGwyLmNvcmUuQ2F0YWxvZ0NhY2hlU3RhdHVzEjQKC2NhdGFsb2dfa2V5GAIgASgLMh8uZmx5dGVpZGwyLmNvcmUuQ2F0YWxvZ01ldGFkYXRhEkUKEnJlc2VydmF0aW9uX3N0YXR1cxgDIAEoDjIpLmZseXRlaWRsMi5jb3JlLkNhdGFsb2dSZXNlcnZhdGlvbi5TdGF0dXMSFgoOY2hlY2twb2ludF91cmkYBCABKAkiUgobUGFyZW50VGFza0V4ZWN1dGlvbk1ldGFkYXRhEjMKAmlkGAEgASgLMicuZmx5dGVpZGwyLmNvcmUuVGFza0V4ZWN1dGlvbklkZW50aWZpZXIiLgobUGFyZW50Tm9kZUV4ZWN1dGlvbk1ldGFkYXRhEg8KB25vZGVfaWQYASABKAkiTgoLRXZlbnRSZWFzb24SDgoGcmVhc29uGAEgASgJEi8KC29jY3VycmVkX2F0GAIgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcCLzBgoSVGFza0V4ZWN1dGlvbkV2ZW50EisKB3Rhc2tfaWQYASABKAsyGi5mbHl0ZWlkbDIuY29yZS5JZGVudGlmaWVyEkkKGHBhcmVudF9ub2RlX2V4ZWN1dGlvbl9pZBgCIAEoCzInLmZseXRlaWRsMi5jb3JlLk5vZGVFeGVjdXRpb25JZGVudGlmaWVyEhUKDXJldHJ5X2F0dGVtcHQYAyABKA0SMgoFcGhhc2UYBCABKA4yIy5mbHl0ZWlkbDIuY29yZS5UYXNrRXhlY3V0aW9uLlBoYXNlEhMKC3Byb2R1Y2VyX2lkGAUgASgJEiUKBGxvZ3MYBiADKAsyFy5mbHl0ZWlkbDIuY29yZS5UYXNrTG9nEi8KC29jY3VycmVkX2F0GAcgASgLMhouZ29vZ2xlLnByb3RvYnVmLlRpbWVzdGFtcBITCglpbnB1dF91cmkYCCABKAlIABIwCgppbnB1dF9kYXRhGBMgASgLMhouZmx5dGVpZGwyLmNvcmUuTGl0ZXJhbE1hcEgAEhQKCm91dHB1dF91cmkYCSABKAlIARIvCgVlcnJvchgKIAEoCzIeLmZseXRlaWRsMi5jb3JlLkV4ZWN1dGlvbkVycm9ySAESMQoLb3V0cHV0X2RhdGEYESABKAsyGi5mbHl0ZWlkbDIuY29yZS5MaXRlcmFsTWFwSAESLAoLY3VzdG9tX2luZm8YCyABKAsyFy5nb29nbGUucHJvdG9idWYuU3RydWN0EhUKDXBoYXNlX3ZlcnNpb24YDCABKA0SEgoGcmVhc29uGA0gASgJQgIYARItCgdyZWFzb25zGBUgAygLMhwuZmx5dGVpZGwyLmV2ZW50LkV2ZW50UmVhc29uEhEKCXRhc2tfdHlwZRgOIAEoCRI4CghtZXRhZGF0YRgQIAEoCzImLmZseXRlaWRsMi5ldmVudC5UYXNrRXhlY3V0aW9uTWV0YWRhdGESFQoNZXZlbnRfdmVyc2lvbhgSIAEoBRIvCgtyZXBvcnRlZF9hdBgUIAEoCzIaLmdvb2dsZS5wcm90b2J1Zi5UaW1lc3RhbXASLwoLbG9nX2NvbnRleHQYFiABKAsyGi5mbHl0ZWlkbDIuY29yZS5Mb2dDb250ZXh0Qg0KC2lucHV0X3ZhbHVlQg8KDW91dHB1dF9yZXN1bHQioQMKFEV4dGVybmFsUmVzb3VyY2VJbmZvEhMKC2V4dGVybmFsX2lkGAEgASgJEg0KBWluZGV4GAIgASgNEhUKDXJldHJ5X2F0dGVtcHQYAyABKA0SMgoFcGhhc2UYBCABKA4yIy5mbHl0ZWlkbDIuY29yZS5UYXNrRXhlY3V0aW9uLlBoYXNlEjgKDGNhY2hlX3N0YXR1cxgFIAEoDjIiLmZseXRlaWRsMi5jb3JlLkNhdGFsb2dDYWNoZVN0YXR1cxIlCgRsb2dzGAYgAygLMhcuZmx5dGVpZGwyLmNvcmUuVGFza0xvZxJHChZ3b3JrZmxvd19ub2RlX21ldGFkYXRhGAcgASgLMiUuZmx5dGVpZGwyLmV2ZW50LldvcmtmbG93Tm9kZU1ldGFkYXRhSAASLAoLY3VzdG9tX2luZm8YCCABKAsyFy5nb29nbGUucHJvdG9idWYuU3RydWN0Ei8KC2xvZ19jb250ZXh0GAkgASgLMhouZmx5dGVpZGwyLmNvcmUuTG9nQ29udGV4dEIRCg90YXJnZXRfbWV0YWRhdGEiPwoQUmVzb3VyY2VQb29sSW5mbxIYChBhbGxvY2F0aW9uX3Rva2VuGAEgASgJEhEKCW5hbWVzcGFjZRgCIAEoCSLLAgoVVGFza0V4ZWN1dGlvbk1ldGFkYXRhEhYKDmdlbmVyYXRlZF9uYW1lGAEgASgJEkEKEmV4dGVybmFsX3Jlc291cmNlcxgCIAMoCzIlLmZseXRlaWRsMi5ldmVudC5FeHRlcm5hbFJlc291cmNlSW5mbxI9ChJyZXNvdXJjZV9wb29sX2luZm8YAyADKAsyIS5mbHl0ZWlkbDIuZXZlbnQuUmVzb3VyY2VQb29sSW5mbxIZChFwbHVnaW5faWRlbnRpZmllchgEIAEoCRJMCg5pbnN0YW5jZV9jbGFzcxgQIAEoDjI0LmZseXRlaWRsMi5ldmVudC5UYXNrRXhlY3V0aW9uTWV0YWRhdGEuSW5zdGFuY2VDbGFzcyIvCg1JbnN0YW5jZUNsYXNzEgsKB0RFRkFVTFQQABIRCg1JTlRFUlJVUFRJQkxFEAFCtQEKE2NvbS5mbHl0ZWlkbDIuZXZlbnRCCkV2ZW50UHJvdG9IAlABWjNnaXRodWIuY29tL2ZseXRlb3JnL2ZseXRlL3YyL2dlbi9nby9mbHl0ZWlkbDIvZXZlbnSiAgNGRViqAg9GbHl0ZWlkbDIuRXZlbnTKAg9GbHl0ZWlkbDJcRXZlbnTiAhtGbHl0ZWlkbDJcRXZlbnRcR1BCTWV0YWRhdGHqAhBGbHl0ZWlkbDI6OkV2ZW50YgZwcm90bzM", [file_flyteidl2_core_catalog, file_flyteidl2_core_execution, file_flyteidl2_core_identifier, file_flyteidl2_core_literals, file_google_protobuf_struct, file_google_protobuf_timestamp]); + +/** + * @generated from message flyteidl2.event.WorkflowExecutionEvent + */ +export type WorkflowExecutionEvent = Message<"flyteidl2.event.WorkflowExecutionEvent"> & { + /** + * Workflow execution id + * + * @generated from field: flyteidl2.core.WorkflowExecutionIdentifier execution_id = 1; + */ + executionId?: WorkflowExecutionIdentifier; + + /** + * the id of the originator (Propeller) of the event + * + * @generated from field: string producer_id = 2; + */ + producerId: string; + + /** + * @generated from field: flyteidl2.core.WorkflowExecution.Phase phase = 3; + */ + phase: WorkflowExecution_Phase; + + /** + * This timestamp represents when the original event occurred, it is generated + * by the executor of the workflow. + * + * @generated from field: google.protobuf.Timestamp occurred_at = 4; + */ + occurredAt?: Timestamp; + + /** + * @generated from oneof flyteidl2.event.WorkflowExecutionEvent.output_result + */ + outputResult: { + /** + * URL to the output of the execution, it encodes all the information + * including Cloud source provider. ie., s3://... + * + * @generated from field: string output_uri = 5; + */ + value: string; + case: "outputUri"; + } | { + /** + * Error information for the execution + * + * @generated from field: flyteidl2.core.ExecutionError error = 6; + */ + value: ExecutionError; + case: "error"; + } | { + /** + * Raw output data produced by this workflow execution. + * + * @generated from field: flyteidl2.core.LiteralMap output_data = 7; + */ + value: LiteralMap; + case: "outputData"; + } | { case: undefined; value?: undefined }; +}; + +/** + * Describes the message flyteidl2.event.WorkflowExecutionEvent. + * Use `create(WorkflowExecutionEventSchema)` to create a new message. + */ +export const WorkflowExecutionEventSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 0); + +/** + * @generated from message flyteidl2.event.NodeExecutionEvent + */ +export type NodeExecutionEvent = Message<"flyteidl2.event.NodeExecutionEvent"> & { + /** + * Unique identifier for this node execution + * + * @generated from field: flyteidl2.core.NodeExecutionIdentifier id = 1; + */ + id?: NodeExecutionIdentifier; + + /** + * the id of the originator (Propeller) of the event + * + * @generated from field: string producer_id = 2; + */ + producerId: string; + + /** + * @generated from field: flyteidl2.core.NodeExecution.Phase phase = 3; + */ + phase: NodeExecution_Phase; + + /** + * This timestamp represents when the original event occurred, it is generated + * by the executor of the node. + * + * @generated from field: google.protobuf.Timestamp occurred_at = 4; + */ + occurredAt?: Timestamp; + + /** + * @generated from oneof flyteidl2.event.NodeExecutionEvent.input_value + */ + inputValue: { + /** + * @generated from field: string input_uri = 5; + */ + value: string; + case: "inputUri"; + } | { + /** + * Raw input data consumed by this node execution. + * + * @generated from field: flyteidl2.core.LiteralMap input_data = 20; + */ + value: LiteralMap; + case: "inputData"; + } | { case: undefined; value?: undefined }; + + /** + * @generated from oneof flyteidl2.event.NodeExecutionEvent.output_result + */ + outputResult: { + /** + * URL to the output of the execution, it encodes all the information + * including Cloud source provider. ie., s3://... + * + * @generated from field: string output_uri = 6; + */ + value: string; + case: "outputUri"; + } | { + /** + * Error information for the execution + * + * @generated from field: flyteidl2.core.ExecutionError error = 7; + */ + value: ExecutionError; + case: "error"; + } | { + /** + * Raw output data produced by this node execution. + * + * @generated from field: flyteidl2.core.LiteralMap output_data = 15; + */ + value: LiteralMap; + case: "outputData"; + } | { case: undefined; value?: undefined }; + + /** + * Additional metadata to do with this event's node target based + * on the node type + * + * @generated from oneof flyteidl2.event.NodeExecutionEvent.target_metadata + */ + targetMetadata: { + /** + * @generated from field: flyteidl2.event.WorkflowNodeMetadata workflow_node_metadata = 8; + */ + value: WorkflowNodeMetadata; + case: "workflowNodeMetadata"; + } | { + /** + * @generated from field: flyteidl2.event.TaskNodeMetadata task_node_metadata = 14; + */ + value: TaskNodeMetadata; + case: "taskNodeMetadata"; + } | { case: undefined; value?: undefined }; + + /** + * [To be deprecated] Specifies which task (if any) launched this node. + * + * @generated from field: flyteidl2.event.ParentTaskExecutionMetadata parent_task_metadata = 9; + */ + parentTaskMetadata?: ParentTaskExecutionMetadata; + + /** + * Specifies the parent node of the current node execution. Node executions at level zero will not have a parent node. + * + * @generated from field: flyteidl2.event.ParentNodeExecutionMetadata parent_node_metadata = 10; + */ + parentNodeMetadata?: ParentNodeExecutionMetadata; + + /** + * Retry group to indicate grouping of nodes by retries + * + * @generated from field: string retry_group = 11; + */ + retryGroup: string; + + /** + * Identifier of the node in the original workflow/graph + * This maps to value of WorkflowTemplate.nodes[X].id + * + * @generated from field: string spec_node_id = 12; + */ + specNodeId: string; + + /** + * Friendly readable name for the node + * + * @generated from field: string node_name = 13; + */ + nodeName: string; + + /** + * @generated from field: int32 event_version = 16; + */ + eventVersion: number; + + /** + * Whether this node launched a subworkflow. + * + * @generated from field: bool is_parent = 17; + */ + isParent: boolean; + + /** + * Whether this node yielded a dynamic workflow. + * + * @generated from field: bool is_dynamic = 18; + */ + isDynamic: boolean; + + /** + * String location uniquely identifying where the deck HTML file is + * NativeUrl specifies the url in the format of the configured storage provider (e.g. s3://my-bucket/randomstring/suffix.tar) + * + * @generated from field: string deck_uri = 19; + */ + deckUri: string; + + /** + * This timestamp represents the instant when the event was reported by the executing framework. For example, + * when first processing a node the `occurred_at` timestamp should be the instant propeller makes progress, so when + * literal inputs are initially copied. The event however will not be sent until after the copy completes. + * Extracting both of these timestamps facilitates a more accurate portrayal of the evaluation time-series. + * + * @generated from field: google.protobuf.Timestamp reported_at = 21; + */ + reportedAt?: Timestamp; + + /** + * Indicates if this node is an ArrayNode. + * + * @generated from field: bool is_array = 22; + */ + isArray: boolean; + + /** + * So that Admin doesn't have to rebuild the node execution graph to find the target entity, propeller will fill this + * in optionally - currently this is only filled in for subworkflows. This is the ID of the subworkflow corresponding + * to this node execution. It is difficult to find because Admin only sees one node at a time. A subworkflow could be + * nested multiple layers deep, and you'd need to access the correct workflow template to know the target subworkflow. + * + * @generated from field: flyteidl2.core.Identifier target_entity = 23; + */ + targetEntity?: Identifier; + + /** + * Tasks and subworkflows (but not launch plans) that are run within a dynamic task are effectively independent of + * the tasks that are registered in Admin's db. Confusingly, they are often identical, but sometimes they are not + * even registered at all. Similar to the target_entity field, at the time Admin receives this event, it has no idea + * if the relevant execution entity is was registered, or dynamic. This field indicates that the target_entity ID, + * as well as task IDs in any corresponding Task Executions, should not be used to looked up the task in Admin's db. + * + * @generated from field: bool is_in_dynamic_chain = 24; + */ + isInDynamicChain: boolean; + + /** + * Whether this node launched an eager task. + * + * @generated from field: bool is_eager = 25; + */ + isEager: boolean; +}; + +/** + * Describes the message flyteidl2.event.NodeExecutionEvent. + * Use `create(NodeExecutionEventSchema)` to create a new message. + */ +export const NodeExecutionEventSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 1); + +/** + * For Workflow Nodes we need to send information about the workflow that's launched + * + * @generated from message flyteidl2.event.WorkflowNodeMetadata + */ +export type WorkflowNodeMetadata = Message<"flyteidl2.event.WorkflowNodeMetadata"> & { + /** + * @generated from field: flyteidl2.core.WorkflowExecutionIdentifier execution_id = 1; + */ + executionId?: WorkflowExecutionIdentifier; +}; + +/** + * Describes the message flyteidl2.event.WorkflowNodeMetadata. + * Use `create(WorkflowNodeMetadataSchema)` to create a new message. + */ +export const WorkflowNodeMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 2); + +/** + * @generated from message flyteidl2.event.TaskNodeMetadata + */ +export type TaskNodeMetadata = Message<"flyteidl2.event.TaskNodeMetadata"> & { + /** + * Captures the status of caching for this execution. + * + * @generated from field: flyteidl2.core.CatalogCacheStatus cache_status = 1; + */ + cacheStatus: CatalogCacheStatus; + + /** + * This structure carries the catalog artifact information + * + * @generated from field: flyteidl2.core.CatalogMetadata catalog_key = 2; + */ + catalogKey?: CatalogMetadata; + + /** + * Captures the status of cache reservations for this execution. + * + * @generated from field: flyteidl2.core.CatalogReservation.Status reservation_status = 3; + */ + reservationStatus: CatalogReservation_Status; + + /** + * The latest checkpoint location + * + * @generated from field: string checkpoint_uri = 4; + */ + checkpointUri: string; +}; + +/** + * Describes the message flyteidl2.event.TaskNodeMetadata. + * Use `create(TaskNodeMetadataSchema)` to create a new message. + */ +export const TaskNodeMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 3); + +/** + * @generated from message flyteidl2.event.ParentTaskExecutionMetadata + */ +export type ParentTaskExecutionMetadata = Message<"flyteidl2.event.ParentTaskExecutionMetadata"> & { + /** + * @generated from field: flyteidl2.core.TaskExecutionIdentifier id = 1; + */ + id?: TaskExecutionIdentifier; +}; + +/** + * Describes the message flyteidl2.event.ParentTaskExecutionMetadata. + * Use `create(ParentTaskExecutionMetadataSchema)` to create a new message. + */ +export const ParentTaskExecutionMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 4); + +/** + * @generated from message flyteidl2.event.ParentNodeExecutionMetadata + */ +export type ParentNodeExecutionMetadata = Message<"flyteidl2.event.ParentNodeExecutionMetadata"> & { + /** + * Unique identifier of the parent node id within the execution + * This is value of core.NodeExecutionIdentifier.node_id of the parent node + * + * @generated from field: string node_id = 1; + */ + nodeId: string; +}; + +/** + * Describes the message flyteidl2.event.ParentNodeExecutionMetadata. + * Use `create(ParentNodeExecutionMetadataSchema)` to create a new message. + */ +export const ParentNodeExecutionMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 5); + +/** + * @generated from message flyteidl2.event.EventReason + */ +export type EventReason = Message<"flyteidl2.event.EventReason"> & { + /** + * An explanation for this event + * + * @generated from field: string reason = 1; + */ + reason: string; + + /** + * The time this reason occurred + * + * @generated from field: google.protobuf.Timestamp occurred_at = 2; + */ + occurredAt?: Timestamp; +}; + +/** + * Describes the message flyteidl2.event.EventReason. + * Use `create(EventReasonSchema)` to create a new message. + */ +export const EventReasonSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 6); + +/** + * Plugin specific execution event information. For tasks like Python, Hive, Spark, DynamicJob. + * + * @generated from message flyteidl2.event.TaskExecutionEvent + */ +export type TaskExecutionEvent = Message<"flyteidl2.event.TaskExecutionEvent"> & { + /** + * ID of the task. In combination with the retryAttempt this will indicate + * the task execution uniquely for a given parent node execution. + * + * @generated from field: flyteidl2.core.Identifier task_id = 1; + */ + taskId?: Identifier; + + /** + * A task execution is always kicked off by a node execution, the event consumer + * will use the parent_id to relate the task to it's parent node execution + * + * @generated from field: flyteidl2.core.NodeExecutionIdentifier parent_node_execution_id = 2; + */ + parentNodeExecutionId?: NodeExecutionIdentifier; + + /** + * retry attempt number for this task, ie., 2 for the second attempt + * + * @generated from field: uint32 retry_attempt = 3; + */ + retryAttempt: number; + + /** + * Phase associated with the event + * + * @generated from field: flyteidl2.core.TaskExecution.Phase phase = 4; + */ + phase: TaskExecution_Phase; + + /** + * id of the process that sent this event, mainly for trace debugging + * + * @generated from field: string producer_id = 5; + */ + producerId: string; + + /** + * log information for the task execution + * + * @generated from field: repeated flyteidl2.core.TaskLog logs = 6; + */ + logs: TaskLog[]; + + /** + * This timestamp represents when the original event occurred, it is generated + * by the executor of the task. + * + * @generated from field: google.protobuf.Timestamp occurred_at = 7; + */ + occurredAt?: Timestamp; + + /** + * @generated from oneof flyteidl2.event.TaskExecutionEvent.input_value + */ + inputValue: { + /** + * URI of the input file, it encodes all the information + * including Cloud source provider. ie., s3://... + * + * @generated from field: string input_uri = 8; + */ + value: string; + case: "inputUri"; + } | { + /** + * Raw input data consumed by this task execution. + * + * @generated from field: flyteidl2.core.LiteralMap input_data = 19; + */ + value: LiteralMap; + case: "inputData"; + } | { case: undefined; value?: undefined }; + + /** + * @generated from oneof flyteidl2.event.TaskExecutionEvent.output_result + */ + outputResult: { + /** + * URI to the output of the execution, it will be in a format that encodes all the information + * including Cloud source provider. ie., s3://... + * + * @generated from field: string output_uri = 9; + */ + value: string; + case: "outputUri"; + } | { + /** + * Error information for the execution + * + * @generated from field: flyteidl2.core.ExecutionError error = 10; + */ + value: ExecutionError; + case: "error"; + } | { + /** + * Raw output data produced by this task execution. + * + * @generated from field: flyteidl2.core.LiteralMap output_data = 17; + */ + value: LiteralMap; + case: "outputData"; + } | { case: undefined; value?: undefined }; + + /** + * Custom data that the task plugin sends back. This is extensible to allow various plugins in the system. + * + * @generated from field: google.protobuf.Struct custom_info = 11; + */ + customInfo?: JsonObject; + + /** + * Some phases, like RUNNING, can send multiple events with changed metadata (new logs, additional custom_info, etc) + * that should be recorded regardless of the lack of phase change. + * The version field should be incremented when metadata changes across the duration of an individual phase. + * + * @generated from field: uint32 phase_version = 12; + */ + phaseVersion: number; + + /** + * An optional explanation for the phase transition. + * Deprecated: Use reasons instead. + * + * @generated from field: string reason = 13 [deprecated = true]; + * @deprecated + */ + reason: string; + + /** + * An optional list of explanations for the phase transition. + * + * @generated from field: repeated flyteidl2.event.EventReason reasons = 21; + */ + reasons: EventReason[]; + + /** + * A predefined yet extensible Task type identifier. If the task definition is already registered in flyte admin + * this type will be identical, but not all task executions necessarily use pre-registered definitions and this + * type is useful to render the task in the UI, filter task executions, etc. + * + * @generated from field: string task_type = 14; + */ + taskType: string; + + /** + * Metadata around how a task was executed. + * + * @generated from field: flyteidl2.event.TaskExecutionMetadata metadata = 16; + */ + metadata?: TaskExecutionMetadata; + + /** + * The event version is used to indicate versioned changes in how data is reported using this + * proto message. For example, event_verison > 0 means that maps tasks report logs using the + * TaskExecutionMetadata ExternalResourceInfo fields for each subtask rather than the TaskLog + * in this message. + * + * @generated from field: int32 event_version = 18; + */ + eventVersion: number; + + /** + * This timestamp represents the instant when the event was reported by the executing framework. For example, a k8s + * pod task may be marked completed at (ie. `occurred_at`) the instant the container running user code completes, + * but this event will not be reported until the pod is marked as completed. Extracting both of these timestamps + * facilitates a more accurate portrayal of the evaluation time-series. + * + * @generated from field: google.protobuf.Timestamp reported_at = 20; + */ + reportedAt?: Timestamp; + + /** + * Contains metadata required to identify logs related to this task execution + * + * @generated from field: flyteidl2.core.LogContext log_context = 22; + */ + logContext?: LogContext; +}; + +/** + * Describes the message flyteidl2.event.TaskExecutionEvent. + * Use `create(TaskExecutionEventSchema)` to create a new message. + */ +export const TaskExecutionEventSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 7); + +/** + * This message contains metadata about external resources produced or used by a specific task execution. + * + * @generated from message flyteidl2.event.ExternalResourceInfo + */ +export type ExternalResourceInfo = Message<"flyteidl2.event.ExternalResourceInfo"> & { + /** + * Identifier for an external resource created by this task execution, for example Qubole query ID or presto query ids. + * + * @generated from field: string external_id = 1; + */ + externalId: string; + + /** + * A unique index for the external resource with respect to all external resources for this task. Although the + * identifier may change between task reporting events or retries, this will remain the same to enable aggregating + * information from multiple reports. + * + * @generated from field: uint32 index = 2; + */ + index: number; + + /** + * Retry attempt number for this external resource, ie., 2 for the second attempt + * + * @generated from field: uint32 retry_attempt = 3; + */ + retryAttempt: number; + + /** + * Phase associated with the external resource + * + * @generated from field: flyteidl2.core.TaskExecution.Phase phase = 4; + */ + phase: TaskExecution_Phase; + + /** + * Captures the status of caching for this external resource execution. + * + * @generated from field: flyteidl2.core.CatalogCacheStatus cache_status = 5; + */ + cacheStatus: CatalogCacheStatus; + + /** + * log information for the external resource execution + * + * @generated from field: repeated flyteidl2.core.TaskLog logs = 6; + */ + logs: TaskLog[]; + + /** + * Additional metadata to do with this event's node target based on the node type. We are + * explicitly not including the task_node_metadata here because it is not clear if it is needed. + * If we decide to include in the future, we should deprecate the cache_status field. + * + * @generated from oneof flyteidl2.event.ExternalResourceInfo.target_metadata + */ + targetMetadata: { + /** + * @generated from field: flyteidl2.event.WorkflowNodeMetadata workflow_node_metadata = 7; + */ + value: WorkflowNodeMetadata; + case: "workflowNodeMetadata"; + } | { case: undefined; value?: undefined }; + + /** + * Extensible field for custom, plugin-specific info + * + * @generated from field: google.protobuf.Struct custom_info = 8; + */ + customInfo?: JsonObject; + + /** + * Contains metadata required to identify logs related to this task execution + * + * @generated from field: flyteidl2.core.LogContext log_context = 9; + */ + logContext?: LogContext; +}; + +/** + * Describes the message flyteidl2.event.ExternalResourceInfo. + * Use `create(ExternalResourceInfoSchema)` to create a new message. + */ +export const ExternalResourceInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 8); + +/** + * This message holds task execution metadata specific to resource allocation used to manage concurrent + * executions for a project namespace. + * + * @generated from message flyteidl2.event.ResourcePoolInfo + */ +export type ResourcePoolInfo = Message<"flyteidl2.event.ResourcePoolInfo"> & { + /** + * Unique resource ID used to identify this execution when allocating a token. + * + * @generated from field: string allocation_token = 1; + */ + allocationToken: string; + + /** + * Namespace under which this task execution requested an allocation token. + * + * @generated from field: string namespace = 2; + */ + namespace: string; +}; + +/** + * Describes the message flyteidl2.event.ResourcePoolInfo. + * Use `create(ResourcePoolInfoSchema)` to create a new message. + */ +export const ResourcePoolInfoSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 9); + +/** + * Holds metadata around how a task was executed. + * As a task transitions across event phases during execution some attributes, such its generated name, generated external resources, + * and more may grow in size but not change necessarily based on the phase transition that sparked the event update. + * Metadata is a container for these attributes across the task execution lifecycle. + * + * @generated from message flyteidl2.event.TaskExecutionMetadata + */ +export type TaskExecutionMetadata = Message<"flyteidl2.event.TaskExecutionMetadata"> & { + /** + * Unique, generated name for this task execution used by the backend. + * + * @generated from field: string generated_name = 1; + */ + generatedName: string; + + /** + * Additional data on external resources on other back-ends or platforms (e.g. Hive, Qubole, etc) launched by this task execution. + * + * @generated from field: repeated flyteidl2.event.ExternalResourceInfo external_resources = 2; + */ + externalResources: ExternalResourceInfo[]; + + /** + * Includes additional data on concurrent resource management used during execution.. + * This is a repeated field because a plugin can request multiple resource allocations during execution. + * + * @generated from field: repeated flyteidl2.event.ResourcePoolInfo resource_pool_info = 3; + */ + resourcePoolInfo: ResourcePoolInfo[]; + + /** + * The identifier of the plugin used to execute this task. + * + * @generated from field: string plugin_identifier = 4; + */ + pluginIdentifier: string; + + /** + * @generated from field: flyteidl2.event.TaskExecutionMetadata.InstanceClass instance_class = 16; + */ + instanceClass: TaskExecutionMetadata_InstanceClass; +}; + +/** + * Describes the message flyteidl2.event.TaskExecutionMetadata. + * Use `create(TaskExecutionMetadataSchema)` to create a new message. + */ +export const TaskExecutionMetadataSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_event_event, 10); + +/** + * Includes the broad category of machine used for this specific task execution. + * + * @generated from enum flyteidl2.event.TaskExecutionMetadata.InstanceClass + */ +export enum TaskExecutionMetadata_InstanceClass { + /** + * The default instance class configured for the flyte application platform. + * + * @generated from enum value: DEFAULT = 0; + */ + DEFAULT = 0, + + /** + * The instance class configured for interruptible tasks. + * + * @generated from enum value: INTERRUPTIBLE = 1; + */ + INTERRUPTIBLE = 1, +} + +/** + * Describes the enum flyteidl2.event.TaskExecutionMetadata.InstanceClass. + */ +export const TaskExecutionMetadata_InstanceClassSchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_event_event, 10, 0); + diff --git a/gen/ts/flyteidl2/plugins/common_pb.ts b/gen/ts/flyteidl2/plugins/common_pb.ts new file mode 100644 index 0000000000..e02ac7ec9a --- /dev/null +++ b/gen/ts/flyteidl2/plugins/common_pb.ts @@ -0,0 +1,82 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/common.proto (package flyteidl2.plugins, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Resources } from "../core/tasks_pb.ts"; +import { file_flyteidl2_core_tasks } from "../core/tasks_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/common.proto. + */ +export const file_flyteidl2_plugins_common: GenFile = /*@__PURE__*/ + fileDesc("Ch5mbHl0ZWlkbDIvcGx1Z2lucy9jb21tb24ucHJvdG8SEWZseXRlaWRsMi5wbHVnaW5zIpwBChFDb21tb25SZXBsaWNhU3BlYxIQCghyZXBsaWNhcxgBIAEoBRINCgVpbWFnZRgCIAEoCRIsCglyZXNvdXJjZXMYAyABKAsyGS5mbHl0ZWlkbDIuY29yZS5SZXNvdXJjZXMSOAoOcmVzdGFydF9wb2xpY3kYBCABKA4yIC5mbHl0ZWlkbDIucGx1Z2lucy5SZXN0YXJ0UG9saWN5KmMKDVJlc3RhcnRQb2xpY3kSGAoUUkVTVEFSVF9QT0xJQ1lfTkVWRVIQABIdChlSRVNUQVJUX1BPTElDWV9PTl9GQUlMVVJFEAESGQoVUkVTVEFSVF9QT0xJQ1lfQUxXQVlTEAJCwgEKFWNvbS5mbHl0ZWlkbDIucGx1Z2luc0ILQ29tbW9uUHJvdG9IAlABWjVnaXRodWIuY29tL2ZseXRlb3JnL2ZseXRlL3YyL2dlbi9nby9mbHl0ZWlkbDIvcGx1Z2luc6ICA0ZQWKoCEUZseXRlaWRsMi5QbHVnaW5zygIRRmx5dGVpZGwyXFBsdWdpbnPiAh1GbHl0ZWlkbDJcUGx1Z2luc1xHUEJNZXRhZGF0YeoCEkZseXRlaWRsMjo6UGx1Z2luc2IGcHJvdG8z", [file_flyteidl2_core_tasks]); + +/** + * @generated from message flyteidl2.plugins.CommonReplicaSpec + */ +export type CommonReplicaSpec = Message<"flyteidl2.plugins.CommonReplicaSpec"> & { + /** + * Number of replicas + * + * @generated from field: int32 replicas = 1; + */ + replicas: number; + + /** + * Image used for the replica group + * + * @generated from field: string image = 2; + */ + image: string; + + /** + * Resources required for the replica group + * + * @generated from field: flyteidl2.core.Resources resources = 3; + */ + resources?: Resources; + + /** + * RestartPolicy determines whether pods will be restarted when they exit + * + * @generated from field: flyteidl2.plugins.RestartPolicy restart_policy = 4; + */ + restartPolicy: RestartPolicy; +}; + +/** + * Describes the message flyteidl2.plugins.CommonReplicaSpec. + * Use `create(CommonReplicaSpecSchema)` to create a new message. + */ +export const CommonReplicaSpecSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_common, 0); + +/** + * @generated from enum flyteidl2.plugins.RestartPolicy + */ +export enum RestartPolicy { + /** + * @generated from enum value: RESTART_POLICY_NEVER = 0; + */ + NEVER = 0, + + /** + * @generated from enum value: RESTART_POLICY_ON_FAILURE = 1; + */ + ON_FAILURE = 1, + + /** + * @generated from enum value: RESTART_POLICY_ALWAYS = 2; + */ + ALWAYS = 2, +} + +/** + * Describes the enum flyteidl2.plugins.RestartPolicy. + */ +export const RestartPolicySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_plugins_common, 0); + diff --git a/gen/ts/flyteidl2/plugins/kubeflow/common_pb.ts b/gen/ts/flyteidl2/plugins/kubeflow/common_pb.ts new file mode 100644 index 0000000000..cc643853ee --- /dev/null +++ b/gen/ts/flyteidl2/plugins/kubeflow/common_pb.ts @@ -0,0 +1,81 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/kubeflow/common.proto (package flyteidl2.plugins.kubeflow, syntax proto3) +/* eslint-disable */ + +import type { GenEnum, GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { enumDesc, fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/kubeflow/common.proto. + */ +export const file_flyteidl2_plugins_kubeflow_common: GenFile = /*@__PURE__*/ + fileDesc("CidmbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvdy9jb21tb24ucHJvdG8SGmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93Iq0BCglSdW5Qb2xpY3kSRAoQY2xlYW5fcG9kX3BvbGljeRgBIAEoDjIqLmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93LkNsZWFuUG9kUG9saWN5EiIKGnR0bF9zZWNvbmRzX2FmdGVyX2ZpbmlzaGVkGAIgASgFEh8KF2FjdGl2ZV9kZWFkbGluZV9zZWNvbmRzGAMgASgFEhUKDWJhY2tvZmZfbGltaXQYBCABKAUqYAoOQ2xlYW5Qb2RQb2xpY3kSGAoUQ0xFQU5QT0RfUE9MSUNZX05PTkUQABIbChdDTEVBTlBPRF9QT0xJQ1lfUlVOTklORxABEhcKE0NMRUFOUE9EX1BPTElDWV9BTEwQAkL5AQoeY29tLmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93QgtDb21tb25Qcm90b0gCUAFaPmdpdGh1Yi5jb20vZmx5dGVvcmcvZmx5dGUvdjIvZ2VuL2dvL2ZseXRlaWRsMi9wbHVnaW5zL2t1YmVmbG93ogIDRlBLqgIaRmx5dGVpZGwyLlBsdWdpbnMuS3ViZWZsb3fKAhpGbHl0ZWlkbDJcUGx1Z2luc1xLdWJlZmxvd+ICJkZseXRlaWRsMlxQbHVnaW5zXEt1YmVmbG93XEdQQk1ldGFkYXRh6gIcRmx5dGVpZGwyOjpQbHVnaW5zOjpLdWJlZmxvd2IGcHJvdG8z"); + +/** + * @generated from message flyteidl2.plugins.kubeflow.RunPolicy + */ +export type RunPolicy = Message<"flyteidl2.plugins.kubeflow.RunPolicy"> & { + /** + * Defines the policy to kill pods after the job completes. Default to None. + * + * @generated from field: flyteidl2.plugins.kubeflow.CleanPodPolicy clean_pod_policy = 1; + */ + cleanPodPolicy: CleanPodPolicy; + + /** + * TTL to clean up jobs. Default to infinite. + * + * @generated from field: int32 ttl_seconds_after_finished = 2; + */ + ttlSecondsAfterFinished: number; + + /** + * Specifies the duration in seconds relative to the startTime that the job may be active + * before the system tries to terminate it; value must be positive integer. + * + * @generated from field: int32 active_deadline_seconds = 3; + */ + activeDeadlineSeconds: number; + + /** + * Number of retries before marking this job failed. + * + * @generated from field: int32 backoff_limit = 4; + */ + backoffLimit: number; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.RunPolicy. + * Use `create(RunPolicySchema)` to create a new message. + */ +export const RunPolicySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_common, 0); + +/** + * @generated from enum flyteidl2.plugins.kubeflow.CleanPodPolicy + */ +export enum CleanPodPolicy { + /** + * @generated from enum value: CLEANPOD_POLICY_NONE = 0; + */ + CLEANPOD_POLICY_NONE = 0, + + /** + * @generated from enum value: CLEANPOD_POLICY_RUNNING = 1; + */ + CLEANPOD_POLICY_RUNNING = 1, + + /** + * @generated from enum value: CLEANPOD_POLICY_ALL = 2; + */ + CLEANPOD_POLICY_ALL = 2, +} + +/** + * Describes the enum flyteidl2.plugins.kubeflow.CleanPodPolicy. + */ +export const CleanPodPolicySchema: GenEnum = /*@__PURE__*/ + enumDesc(file_flyteidl2_plugins_kubeflow_common, 0); + diff --git a/gen/ts/flyteidl2/plugins/kubeflow/mpi_pb.ts b/gen/ts/flyteidl2/plugins/kubeflow/mpi_pb.ts new file mode 100644 index 0000000000..d9ffcc2d6c --- /dev/null +++ b/gen/ts/flyteidl2/plugins/kubeflow/mpi_pb.ts @@ -0,0 +1,125 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/kubeflow/mpi.proto (package flyteidl2.plugins.kubeflow, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Resources } from "../../core/tasks_pb.ts"; +import { file_flyteidl2_core_tasks } from "../../core/tasks_pb.ts"; +import type { CommonReplicaSpec, RestartPolicy } from "../common_pb.ts"; +import { file_flyteidl2_plugins_common } from "../common_pb.ts"; +import type { RunPolicy } from "./common_pb.ts"; +import { file_flyteidl2_plugins_kubeflow_common } from "./common_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/kubeflow/mpi.proto. + */ +export const file_flyteidl2_plugins_kubeflow_mpi: GenFile = /*@__PURE__*/ + fileDesc("CiRmbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvdy9tcGkucHJvdG8SGmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93IpgCChpEaXN0cmlidXRlZE1QSVRyYWluaW5nVGFzaxJWCg93b3JrZXJfcmVwbGljYXMYASABKAsyPS5mbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvdy5EaXN0cmlidXRlZE1QSVRyYWluaW5nUmVwbGljYVNwZWMSWAoRbGF1bmNoZXJfcmVwbGljYXMYAiABKAsyPS5mbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvdy5EaXN0cmlidXRlZE1QSVRyYWluaW5nUmVwbGljYVNwZWMSOQoKcnVuX3BvbGljeRgDIAEoCzIlLmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93LlJ1blBvbGljeRINCgVzbG90cxgEIAEoBSKDAgohRGlzdHJpYnV0ZWRNUElUcmFpbmluZ1JlcGxpY2FTcGVjEhQKCHJlcGxpY2FzGAEgASgFQgIYARIRCgVpbWFnZRgCIAEoCUICGAESMAoJcmVzb3VyY2VzGAMgASgLMhkuZmx5dGVpZGwyLmNvcmUuUmVzb3VyY2VzQgIYARI8Cg5yZXN0YXJ0X3BvbGljeRgEIAEoDjIgLmZseXRlaWRsMi5wbHVnaW5zLlJlc3RhcnRQb2xpY3lCAhgBEg8KB2NvbW1hbmQYBSADKAkSNAoGY29tbW9uGAYgASgLMiQuZmx5dGVpZGwyLnBsdWdpbnMuQ29tbW9uUmVwbGljYVNwZWNC9gEKHmNvbS5mbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvd0IITXBpUHJvdG9IAlABWj5naXRodWIuY29tL2ZseXRlb3JnL2ZseXRlL3YyL2dlbi9nby9mbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvd6ICA0ZQS6oCGkZseXRlaWRsMi5QbHVnaW5zLkt1YmVmbG93ygIaRmx5dGVpZGwyXFBsdWdpbnNcS3ViZWZsb3fiAiZGbHl0ZWlkbDJcUGx1Z2luc1xLdWJlZmxvd1xHUEJNZXRhZGF0YeoCHEZseXRlaWRsMjo6UGx1Z2luczo6S3ViZWZsb3diBnByb3RvMw", [file_flyteidl2_core_tasks, file_flyteidl2_plugins_common, file_flyteidl2_plugins_kubeflow_common]); + +/** + * Proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator + * + * @generated from message flyteidl2.plugins.kubeflow.DistributedMPITrainingTask + */ +export type DistributedMPITrainingTask = Message<"flyteidl2.plugins.kubeflow.DistributedMPITrainingTask"> & { + /** + * Worker replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec worker_replicas = 1; + */ + workerReplicas?: DistributedMPITrainingReplicaSpec; + + /** + * Master replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec launcher_replicas = 2; + */ + launcherReplicas?: DistributedMPITrainingReplicaSpec; + + /** + * RunPolicy encapsulates various runtime policies of the distributed training + * job, for example how to clean up resources and how long the job can stay + * active. + * + * @generated from field: flyteidl2.plugins.kubeflow.RunPolicy run_policy = 3; + */ + runPolicy?: RunPolicy; + + /** + * Number of slots per worker + * + * @generated from field: int32 slots = 4; + */ + slots: number; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedMPITrainingTask. + * Use `create(DistributedMPITrainingTaskSchema)` to create a new message. + */ +export const DistributedMPITrainingTaskSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_mpi, 0); + +/** + * Replica specification for distributed MPI training + * + * @generated from message flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec + */ +export type DistributedMPITrainingReplicaSpec = Message<"flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec"> & { + /** + * 1~4 deprecated. Use common instead. + * Number of replicas + * + * @generated from field: int32 replicas = 1 [deprecated = true]; + * @deprecated + */ + replicas: number; + + /** + * Image used for the replica group + * + * @generated from field: string image = 2 [deprecated = true]; + * @deprecated + */ + image: string; + + /** + * Resources required for the replica group + * + * @generated from field: flyteidl2.core.Resources resources = 3 [deprecated = true]; + * @deprecated + */ + resources?: Resources; + + /** + * Restart policy determines whether pods will be restarted when they exit + * + * @generated from field: flyteidl2.plugins.RestartPolicy restart_policy = 4 [deprecated = true]; + * @deprecated + */ + restartPolicy: RestartPolicy; + + /** + * MPI sometimes requires different command set for different replica groups + * + * @generated from field: repeated string command = 5; + */ + command: string[]; + + /** + * The common replica spec + * + * @generated from field: flyteidl2.plugins.CommonReplicaSpec common = 6; + */ + common?: CommonReplicaSpec; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedMPITrainingReplicaSpec. + * Use `create(DistributedMPITrainingReplicaSpecSchema)` to create a new message. + */ +export const DistributedMPITrainingReplicaSpecSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_mpi, 1); + diff --git a/gen/ts/flyteidl2/plugins/kubeflow/pytorch_pb.ts b/gen/ts/flyteidl2/plugins/kubeflow/pytorch_pb.ts new file mode 100644 index 0000000000..19d06e07b1 --- /dev/null +++ b/gen/ts/flyteidl2/plugins/kubeflow/pytorch_pb.ts @@ -0,0 +1,156 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/kubeflow/pytorch.proto (package flyteidl2.plugins.kubeflow, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Resources } from "../../core/tasks_pb.ts"; +import { file_flyteidl2_core_tasks } from "../../core/tasks_pb.ts"; +import type { CommonReplicaSpec, RestartPolicy } from "../common_pb.ts"; +import { file_flyteidl2_plugins_common } from "../common_pb.ts"; +import type { RunPolicy } from "./common_pb.ts"; +import { file_flyteidl2_plugins_kubeflow_common } from "./common_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/kubeflow/pytorch.proto. + */ +export const file_flyteidl2_plugins_kubeflow_pytorch: GenFile = /*@__PURE__*/ + fileDesc("CihmbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvdy9weXRvcmNoLnByb3RvEhpmbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvdyJ/Cg1FbGFzdGljQ29uZmlnEhQKDHJkenZfYmFja2VuZBgBIAEoCRIUCgxtaW5fcmVwbGljYXMYAiABKAUSFAoMbWF4X3JlcGxpY2FzGAMgASgFEhYKDm5wcm9jX3Blcl9ub2RlGAQgASgFEhQKDG1heF9yZXN0YXJ0cxgFIAEoBSLWAgoeRGlzdHJpYnV0ZWRQeVRvcmNoVHJhaW5pbmdUYXNrEloKD3dvcmtlcl9yZXBsaWNhcxgBIAEoCzJBLmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93LkRpc3RyaWJ1dGVkUHlUb3JjaFRyYWluaW5nUmVwbGljYVNwZWMSWgoPbWFzdGVyX3JlcGxpY2FzGAIgASgLMkEuZmx5dGVpZGwyLnBsdWdpbnMua3ViZWZsb3cuRGlzdHJpYnV0ZWRQeVRvcmNoVHJhaW5pbmdSZXBsaWNhU3BlYxI5CgpydW5fcG9saWN5GAMgASgLMiUuZmx5dGVpZGwyLnBsdWdpbnMua3ViZWZsb3cuUnVuUG9saWN5EkEKDmVsYXN0aWNfY29uZmlnGAQgASgLMikuZmx5dGVpZGwyLnBsdWdpbnMua3ViZWZsb3cuRWxhc3RpY0NvbmZpZyL2AQolRGlzdHJpYnV0ZWRQeVRvcmNoVHJhaW5pbmdSZXBsaWNhU3BlYxIUCghyZXBsaWNhcxgBIAEoBUICGAESEQoFaW1hZ2UYAiABKAlCAhgBEjAKCXJlc291cmNlcxgDIAEoCzIZLmZseXRlaWRsMi5jb3JlLlJlc291cmNlc0ICGAESPAoOcmVzdGFydF9wb2xpY3kYBCABKA4yIC5mbHl0ZWlkbDIucGx1Z2lucy5SZXN0YXJ0UG9saWN5QgIYARI0CgZjb21tb24YBSABKAsyJC5mbHl0ZWlkbDIucGx1Z2lucy5Db21tb25SZXBsaWNhU3BlY0L6AQoeY29tLmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93QgxQeXRvcmNoUHJvdG9IAlABWj5naXRodWIuY29tL2ZseXRlb3JnL2ZseXRlL3YyL2dlbi9nby9mbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvd6ICA0ZQS6oCGkZseXRlaWRsMi5QbHVnaW5zLkt1YmVmbG93ygIaRmx5dGVpZGwyXFBsdWdpbnNcS3ViZWZsb3fiAiZGbHl0ZWlkbDJcUGx1Z2luc1xLdWJlZmxvd1xHUEJNZXRhZGF0YeoCHEZseXRlaWRsMjo6UGx1Z2luczo6S3ViZWZsb3diBnByb3RvMw", [file_flyteidl2_core_tasks, file_flyteidl2_plugins_common, file_flyteidl2_plugins_kubeflow_common]); + +/** + * Custom proto for torch elastic config for distributed training using + * https://github.com/kubeflow/training-operator/blob/master/pkg/apis/kubeflow.org/v1/pytorch_types.go + * + * @generated from message flyteidl2.plugins.kubeflow.ElasticConfig + */ +export type ElasticConfig = Message<"flyteidl2.plugins.kubeflow.ElasticConfig"> & { + /** + * @generated from field: string rdzv_backend = 1; + */ + rdzvBackend: string; + + /** + * @generated from field: int32 min_replicas = 2; + */ + minReplicas: number; + + /** + * @generated from field: int32 max_replicas = 3; + */ + maxReplicas: number; + + /** + * @generated from field: int32 nproc_per_node = 4; + */ + nprocPerNode: number; + + /** + * @generated from field: int32 max_restarts = 5; + */ + maxRestarts: number; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.ElasticConfig. + * Use `create(ElasticConfigSchema)` to create a new message. + */ +export const ElasticConfigSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_pytorch, 0); + +/** + * Proto for plugin that enables distributed training using https://github.com/kubeflow/pytorch-operator + * + * @generated from message flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask + */ +export type DistributedPyTorchTrainingTask = Message<"flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask"> & { + /** + * Worker replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec worker_replicas = 1; + */ + workerReplicas?: DistributedPyTorchTrainingReplicaSpec; + + /** + * Master replicas spec, master replicas can only have 1 replica + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec master_replicas = 2; + */ + masterReplicas?: DistributedPyTorchTrainingReplicaSpec; + + /** + * RunPolicy encapsulates various runtime policies of the distributed training + * job, for example how to clean up resources and how long the job can stay + * active. + * + * @generated from field: flyteidl2.plugins.kubeflow.RunPolicy run_policy = 3; + */ + runPolicy?: RunPolicy; + + /** + * config for an elastic pytorch job + * + * @generated from field: flyteidl2.plugins.kubeflow.ElasticConfig elastic_config = 4; + */ + elasticConfig?: ElasticConfig; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingTask. + * Use `create(DistributedPyTorchTrainingTaskSchema)` to create a new message. + */ +export const DistributedPyTorchTrainingTaskSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_pytorch, 1); + +/** + * @generated from message flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec + */ +export type DistributedPyTorchTrainingReplicaSpec = Message<"flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec"> & { + /** + * 1~4 deprecated. Use common instead. + * Number of replicas + * + * @generated from field: int32 replicas = 1 [deprecated = true]; + * @deprecated + */ + replicas: number; + + /** + * Image used for the replica group + * + * @generated from field: string image = 2 [deprecated = true]; + * @deprecated + */ + image: string; + + /** + * Resources required for the replica group + * + * @generated from field: flyteidl2.core.Resources resources = 3 [deprecated = true]; + * @deprecated + */ + resources?: Resources; + + /** + * Restart policy determines whether pods will be restarted when they exit + * + * @generated from field: flyteidl2.plugins.RestartPolicy restart_policy = 4 [deprecated = true]; + * @deprecated + */ + restartPolicy: RestartPolicy; + + /** + * The common replica spec + * + * @generated from field: flyteidl2.plugins.CommonReplicaSpec common = 5; + */ + common?: CommonReplicaSpec; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedPyTorchTrainingReplicaSpec. + * Use `create(DistributedPyTorchTrainingReplicaSpecSchema)` to create a new message. + */ +export const DistributedPyTorchTrainingReplicaSpecSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_pytorch, 2); + diff --git a/gen/ts/flyteidl2/plugins/kubeflow/tensorflow_pb.ts b/gen/ts/flyteidl2/plugins/kubeflow/tensorflow_pb.ts new file mode 100644 index 0000000000..d9e186f5e4 --- /dev/null +++ b/gen/ts/flyteidl2/plugins/kubeflow/tensorflow_pb.ts @@ -0,0 +1,123 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/kubeflow/tensorflow.proto (package flyteidl2.plugins.kubeflow, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Resources } from "../../core/tasks_pb.ts"; +import { file_flyteidl2_core_tasks } from "../../core/tasks_pb.ts"; +import type { CommonReplicaSpec, RestartPolicy } from "../common_pb.ts"; +import { file_flyteidl2_plugins_common } from "../common_pb.ts"; +import type { RunPolicy } from "./common_pb.ts"; +import { file_flyteidl2_plugins_kubeflow_common } from "./common_pb.ts"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/kubeflow/tensorflow.proto. + */ +export const file_flyteidl2_plugins_kubeflow_tensorflow: GenFile = /*@__PURE__*/ + fileDesc("CitmbHl0ZWlkbDIvcGx1Z2lucy9rdWJlZmxvdy90ZW5zb3JmbG93LnByb3RvEhpmbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvdyLYAwohRGlzdHJpYnV0ZWRUZW5zb3JmbG93VHJhaW5pbmdUYXNrEl0KD3dvcmtlcl9yZXBsaWNhcxgBIAEoCzJELmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93LkRpc3RyaWJ1dGVkVGVuc29yZmxvd1RyYWluaW5nUmVwbGljYVNwZWMSWQoLcHNfcmVwbGljYXMYAiABKAsyRC5mbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvdy5EaXN0cmlidXRlZFRlbnNvcmZsb3dUcmFpbmluZ1JlcGxpY2FTcGVjElwKDmNoaWVmX3JlcGxpY2FzGAMgASgLMkQuZmx5dGVpZGwyLnBsdWdpbnMua3ViZWZsb3cuRGlzdHJpYnV0ZWRUZW5zb3JmbG93VHJhaW5pbmdSZXBsaWNhU3BlYxI5CgpydW5fcG9saWN5GAQgASgLMiUuZmx5dGVpZGwyLnBsdWdpbnMua3ViZWZsb3cuUnVuUG9saWN5EmAKEmV2YWx1YXRvcl9yZXBsaWNhcxgFIAEoCzJELmZseXRlaWRsMi5wbHVnaW5zLmt1YmVmbG93LkRpc3RyaWJ1dGVkVGVuc29yZmxvd1RyYWluaW5nUmVwbGljYVNwZWMi+QEKKERpc3RyaWJ1dGVkVGVuc29yZmxvd1RyYWluaW5nUmVwbGljYVNwZWMSFAoIcmVwbGljYXMYASABKAVCAhgBEhEKBWltYWdlGAIgASgJQgIYARIwCglyZXNvdXJjZXMYAyABKAsyGS5mbHl0ZWlkbDIuY29yZS5SZXNvdXJjZXNCAhgBEjwKDnJlc3RhcnRfcG9saWN5GAQgASgOMiAuZmx5dGVpZGwyLnBsdWdpbnMuUmVzdGFydFBvbGljeUICGAESNAoGY29tbW9uGAUgASgLMiQuZmx5dGVpZGwyLnBsdWdpbnMuQ29tbW9uUmVwbGljYVNwZWNC/QEKHmNvbS5mbHl0ZWlkbDIucGx1Z2lucy5rdWJlZmxvd0IPVGVuc29yZmxvd1Byb3RvSAJQAVo+Z2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL3BsdWdpbnMva3ViZWZsb3eiAgNGUEuqAhpGbHl0ZWlkbDIuUGx1Z2lucy5LdWJlZmxvd8oCGkZseXRlaWRsMlxQbHVnaW5zXEt1YmVmbG934gImRmx5dGVpZGwyXFBsdWdpbnNcS3ViZWZsb3dcR1BCTWV0YWRhdGHqAhxGbHl0ZWlkbDI6OlBsdWdpbnM6Okt1YmVmbG93YgZwcm90bzM", [file_flyteidl2_core_tasks, file_flyteidl2_plugins_common, file_flyteidl2_plugins_kubeflow_common]); + +/** + * Proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator + * + * @generated from message flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask + */ +export type DistributedTensorflowTrainingTask = Message<"flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask"> & { + /** + * Worker replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec worker_replicas = 1; + */ + workerReplicas?: DistributedTensorflowTrainingReplicaSpec; + + /** + * Parameter server replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec ps_replicas = 2; + */ + psReplicas?: DistributedTensorflowTrainingReplicaSpec; + + /** + * Chief replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec chief_replicas = 3; + */ + chiefReplicas?: DistributedTensorflowTrainingReplicaSpec; + + /** + * RunPolicy encapsulates various runtime policies of the distributed training + * job, for example how to clean up resources and how long the job can stay + * active. + * + * @generated from field: flyteidl2.plugins.kubeflow.RunPolicy run_policy = 4; + */ + runPolicy?: RunPolicy; + + /** + * Evaluator replicas spec + * + * @generated from field: flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec evaluator_replicas = 5; + */ + evaluatorReplicas?: DistributedTensorflowTrainingReplicaSpec; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingTask. + * Use `create(DistributedTensorflowTrainingTaskSchema)` to create a new message. + */ +export const DistributedTensorflowTrainingTaskSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_tensorflow, 0); + +/** + * @generated from message flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec + */ +export type DistributedTensorflowTrainingReplicaSpec = Message<"flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec"> & { + /** + * 1~4 deprecated. Use common instead. + * Number of replicas + * + * @generated from field: int32 replicas = 1 [deprecated = true]; + * @deprecated + */ + replicas: number; + + /** + * Image used for the replica group + * + * @generated from field: string image = 2 [deprecated = true]; + * @deprecated + */ + image: string; + + /** + * Resources required for the replica group + * + * @generated from field: flyteidl2.core.Resources resources = 3 [deprecated = true]; + * @deprecated + */ + resources?: Resources; + + /** + * Restart policy determines whether pods will be restarted when they exit + * + * @generated from field: flyteidl2.plugins.RestartPolicy restart_policy = 4 [deprecated = true]; + * @deprecated + */ + restartPolicy: RestartPolicy; + + /** + * The common replica spec + * + * @generated from field: flyteidl2.plugins.CommonReplicaSpec common = 5; + */ + common?: CommonReplicaSpec; +}; + +/** + * Describes the message flyteidl2.plugins.kubeflow.DistributedTensorflowTrainingReplicaSpec. + * Use `create(DistributedTensorflowTrainingReplicaSpecSchema)` to create a new message. + */ +export const DistributedTensorflowTrainingReplicaSpecSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_kubeflow_tensorflow, 1); + diff --git a/gen/ts/flyteidl2/plugins/mpi_pb.ts b/gen/ts/flyteidl2/plugins/mpi_pb.ts new file mode 100644 index 0000000000..94cfb540fd --- /dev/null +++ b/gen/ts/flyteidl2/plugins/mpi_pb.ts @@ -0,0 +1,52 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/mpi.proto (package flyteidl2.plugins, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/mpi.proto. + */ +export const file_flyteidl2_plugins_mpi: GenFile = /*@__PURE__*/ + fileDesc("ChtmbHl0ZWlkbDIvcGx1Z2lucy9tcGkucHJvdG8SEWZseXRlaWRsMi5wbHVnaW5zIl8KGkRpc3RyaWJ1dGVkTVBJVHJhaW5pbmdUYXNrEhMKC251bV93b3JrZXJzGAEgASgFEh0KFW51bV9sYXVuY2hlcl9yZXBsaWNhcxgCIAEoBRINCgVzbG90cxgDIAEoBUK/AQoVY29tLmZseXRlaWRsMi5wbHVnaW5zQghNcGlQcm90b0gCUAFaNWdpdGh1Yi5jb20vZmx5dGVvcmcvZmx5dGUvdjIvZ2VuL2dvL2ZseXRlaWRsMi9wbHVnaW5zogIDRlBYqgIRRmx5dGVpZGwyLlBsdWdpbnPKAhFGbHl0ZWlkbDJcUGx1Z2luc+ICHUZseXRlaWRsMlxQbHVnaW5zXEdQQk1ldGFkYXRh6gISRmx5dGVpZGwyOjpQbHVnaW5zYgZwcm90bzM"); + +/** + * MPI operator proposal https://github.com/kubeflow/community/blob/master/proposals/mpi-operator-proposal.md + * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/mpi-operator + * + * @generated from message flyteidl2.plugins.DistributedMPITrainingTask + */ +export type DistributedMPITrainingTask = Message<"flyteidl2.plugins.DistributedMPITrainingTask"> & { + /** + * number of worker spawned in the cluster for this job + * + * @generated from field: int32 num_workers = 1; + */ + numWorkers: number; + + /** + * number of launcher replicas spawned in the cluster for this job + * The launcher pod invokes mpirun and communicates with worker pods through MPI. + * + * @generated from field: int32 num_launcher_replicas = 2; + */ + numLauncherReplicas: number; + + /** + * number of slots per worker used in hostfile. + * The available slots (GPUs) in each pod. + * + * @generated from field: int32 slots = 3; + */ + slots: number; +}; + +/** + * Describes the message flyteidl2.plugins.DistributedMPITrainingTask. + * Use `create(DistributedMPITrainingTaskSchema)` to create a new message. + */ +export const DistributedMPITrainingTaskSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_mpi, 0); + diff --git a/gen/ts/flyteidl2/plugins/presto_pb.ts b/gen/ts/flyteidl2/plugins/presto_pb.ts new file mode 100644 index 0000000000..c66b1d41df --- /dev/null +++ b/gen/ts/flyteidl2/plugins/presto_pb.ts @@ -0,0 +1,49 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/presto.proto (package flyteidl2.plugins, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/presto.proto. + */ +export const file_flyteidl2_plugins_presto: GenFile = /*@__PURE__*/ + fileDesc("Ch5mbHl0ZWlkbDIvcGx1Z2lucy9wcmVzdG8ucHJvdG8SEWZseXRlaWRsMi5wbHVnaW5zIlgKC1ByZXN0b1F1ZXJ5EhUKDXJvdXRpbmdfZ3JvdXAYASABKAkSDwoHY2F0YWxvZxgCIAEoCRIOCgZzY2hlbWEYAyABKAkSEQoJc3RhdGVtZW50GAQgASgJQsIBChVjb20uZmx5dGVpZGwyLnBsdWdpbnNCC1ByZXN0b1Byb3RvSAJQAVo1Z2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL3BsdWdpbnOiAgNGUFiqAhFGbHl0ZWlkbDIuUGx1Z2luc8oCEUZseXRlaWRsMlxQbHVnaW5z4gIdRmx5dGVpZGwyXFBsdWdpbnNcR1BCTWV0YWRhdGHqAhJGbHl0ZWlkbDI6OlBsdWdpbnNiBnByb3RvMw"); + +/** + * This message works with the 'presto' task type in the SDK and is the object that will be in the 'custom' field + * of a Presto task's TaskTemplate + * + * @generated from message flyteidl2.plugins.PrestoQuery + */ +export type PrestoQuery = Message<"flyteidl2.plugins.PrestoQuery"> & { + /** + * @generated from field: string routing_group = 1; + */ + routingGroup: string; + + /** + * @generated from field: string catalog = 2; + */ + catalog: string; + + /** + * @generated from field: string schema = 3; + */ + schema: string; + + /** + * @generated from field: string statement = 4; + */ + statement: string; +}; + +/** + * Describes the message flyteidl2.plugins.PrestoQuery. + * Use `create(PrestoQuerySchema)` to create a new message. + */ +export const PrestoQuerySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_presto, 0); + diff --git a/gen/ts/flyteidl2/plugins/qubole_pb.ts b/gen/ts/flyteidl2/plugins/qubole_pb.ts new file mode 100644 index 0000000000..d6b26bf059 --- /dev/null +++ b/gen/ts/flyteidl2/plugins/qubole_pb.ts @@ -0,0 +1,98 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/qubole.proto (package flyteidl2.plugins, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/qubole.proto. + */ +export const file_flyteidl2_plugins_qubole: GenFile = /*@__PURE__*/ + fileDesc("Ch5mbHl0ZWlkbDIvcGx1Z2lucy9xdWJvbGUucHJvdG8SEWZseXRlaWRsMi5wbHVnaW5zIkMKCUhpdmVRdWVyeRINCgVxdWVyeRgBIAEoCRITCgt0aW1lb3V0X3NlYxgCIAEoDRISCgpyZXRyeUNvdW50GAMgASgNIkQKE0hpdmVRdWVyeUNvbGxlY3Rpb24SLQoHcXVlcmllcxgCIAMoCzIcLmZseXRlaWRsMi5wbHVnaW5zLkhpdmVRdWVyeSKnAQoNUXVib2xlSGl2ZUpvYhIVCg1jbHVzdGVyX2xhYmVsGAEgASgJEkQKEHF1ZXJ5X2NvbGxlY3Rpb24YAiABKAsyJi5mbHl0ZWlkbDIucGx1Z2lucy5IaXZlUXVlcnlDb2xsZWN0aW9uQgIYARIMCgR0YWdzGAMgAygJEisKBXF1ZXJ5GAQgASgLMhwuZmx5dGVpZGwyLnBsdWdpbnMuSGl2ZVF1ZXJ5QsIBChVjb20uZmx5dGVpZGwyLnBsdWdpbnNCC1F1Ym9sZVByb3RvSAJQAVo1Z2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL3BsdWdpbnOiAgNGUFiqAhFGbHl0ZWlkbDIuUGx1Z2luc8oCEUZseXRlaWRsMlxQbHVnaW5z4gIdRmx5dGVpZGwyXFBsdWdpbnNcR1BCTWV0YWRhdGHqAhJGbHl0ZWlkbDI6OlBsdWdpbnNiBnByb3RvMw"); + +/** + * Defines a query to execute on a hive cluster. + * + * @generated from message flyteidl2.plugins.HiveQuery + */ +export type HiveQuery = Message<"flyteidl2.plugins.HiveQuery"> & { + /** + * @generated from field: string query = 1; + */ + query: string; + + /** + * @generated from field: uint32 timeout_sec = 2; + */ + timeoutSec: number; + + /** + * @generated from field: uint32 retryCount = 3; + */ + retryCount: number; +}; + +/** + * Describes the message flyteidl2.plugins.HiveQuery. + * Use `create(HiveQuerySchema)` to create a new message. + */ +export const HiveQuerySchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_qubole, 0); + +/** + * Defines a collection of hive queries. + * + * @generated from message flyteidl2.plugins.HiveQueryCollection + */ +export type HiveQueryCollection = Message<"flyteidl2.plugins.HiveQueryCollection"> & { + /** + * @generated from field: repeated flyteidl2.plugins.HiveQuery queries = 2; + */ + queries: HiveQuery[]; +}; + +/** + * Describes the message flyteidl2.plugins.HiveQueryCollection. + * Use `create(HiveQueryCollectionSchema)` to create a new message. + */ +export const HiveQueryCollectionSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_qubole, 1); + +/** + * This message works with the 'hive' task type in the SDK and is the object that will be in the 'custom' field + * of a hive task's TaskTemplate + * + * @generated from message flyteidl2.plugins.QuboleHiveJob + */ +export type QuboleHiveJob = Message<"flyteidl2.plugins.QuboleHiveJob"> & { + /** + * @generated from field: string cluster_label = 1; + */ + clusterLabel: string; + + /** + * @generated from field: flyteidl2.plugins.HiveQueryCollection query_collection = 2 [deprecated = true]; + * @deprecated + */ + queryCollection?: HiveQueryCollection; + + /** + * @generated from field: repeated string tags = 3; + */ + tags: string[]; + + /** + * @generated from field: flyteidl2.plugins.HiveQuery query = 4; + */ + query?: HiveQuery; +}; + +/** + * Describes the message flyteidl2.plugins.QuboleHiveJob. + * Use `create(QuboleHiveJobSchema)` to create a new message. + */ +export const QuboleHiveJobSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_qubole, 2); + diff --git a/gen/ts/flyteidl2/plugins/tensorflow_pb.ts b/gen/ts/flyteidl2/plugins/tensorflow_pb.ts new file mode 100644 index 0000000000..36ca9e8e8c --- /dev/null +++ b/gen/ts/flyteidl2/plugins/tensorflow_pb.ts @@ -0,0 +1,57 @@ +// @generated by protoc-gen-es v2.2.5 with parameter "target=ts,import_extension=.ts" +// @generated from file flyteidl2/plugins/tensorflow.proto (package flyteidl2.plugins, syntax proto3) +/* eslint-disable */ + +import type { GenFile, GenMessage } from "@bufbuild/protobuf/codegenv1"; +import { fileDesc, messageDesc } from "@bufbuild/protobuf/codegenv1"; +import type { Message } from "@bufbuild/protobuf"; + +/** + * Describes the file flyteidl2/plugins/tensorflow.proto. + */ +export const file_flyteidl2_plugins_tensorflow: GenFile = /*@__PURE__*/ + fileDesc("CiJmbHl0ZWlkbDIvcGx1Z2lucy90ZW5zb3JmbG93LnByb3RvEhFmbHl0ZWlkbDIucGx1Z2lucyJ9CiFEaXN0cmlidXRlZFRlbnNvcmZsb3dUcmFpbmluZ1Rhc2sSDwoHd29ya2VycxgBIAEoBRITCgtwc19yZXBsaWNhcxgCIAEoBRIWCg5jaGllZl9yZXBsaWNhcxgDIAEoBRIaChJldmFsdWF0b3JfcmVwbGljYXMYBCABKAVCxgEKFWNvbS5mbHl0ZWlkbDIucGx1Z2luc0IPVGVuc29yZmxvd1Byb3RvSAJQAVo1Z2l0aHViLmNvbS9mbHl0ZW9yZy9mbHl0ZS92Mi9nZW4vZ28vZmx5dGVpZGwyL3BsdWdpbnOiAgNGUFiqAhFGbHl0ZWlkbDIuUGx1Z2luc8oCEUZseXRlaWRsMlxQbHVnaW5z4gIdRmx5dGVpZGwyXFBsdWdpbnNcR1BCTWV0YWRhdGHqAhJGbHl0ZWlkbDI6OlBsdWdpbnNiBnByb3RvMw"); + +/** + * Custom proto for plugin that enables distributed training using https://github.com/kubeflow/tf-operator + * + * @generated from message flyteidl2.plugins.DistributedTensorflowTrainingTask + */ +export type DistributedTensorflowTrainingTask = Message<"flyteidl2.plugins.DistributedTensorflowTrainingTask"> & { + /** + * number of worker replicas spawned in the cluster for this job + * + * @generated from field: int32 workers = 1; + */ + workers: number; + + /** + * PS -> Parameter server + * number of ps replicas spawned in the cluster for this job + * + * @generated from field: int32 ps_replicas = 2; + */ + psReplicas: number; + + /** + * number of chief replicas spawned in the cluster for this job + * + * @generated from field: int32 chief_replicas = 3; + */ + chiefReplicas: number; + + /** + * number of evaluator replicas spawned in the cluster for this job + * + * @generated from field: int32 evaluator_replicas = 4; + */ + evaluatorReplicas: number; +}; + +/** + * Describes the message flyteidl2.plugins.DistributedTensorflowTrainingTask. + * Use `create(DistributedTensorflowTrainingTaskSchema)` to create a new message. + */ +export const DistributedTensorflowTrainingTaskSchema: GenMessage = /*@__PURE__*/ + messageDesc(file_flyteidl2_plugins_tensorflow, 0); + diff --git a/go.mod b/go.mod index 33c62e0caf..98b45f34f9 100644 --- a/go.mod +++ b/go.mod @@ -3,24 +3,29 @@ module github.com/flyteorg/flyte/v2 go 1.24.6 require ( - buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 - connectrpc.com/connect v1.18.1 + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20250912141014-52f32327d4b0.1 + connectrpc.com/connect v1.19.1 + github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20200723154620-6f35a1152625 github.com/aws/aws-sdk-go v1.55.8 + github.com/aws/aws-sdk-go-v2 v1.39.6 + github.com/aws/aws-sdk-go-v2/config v1.26.1 + github.com/aws/aws-sdk-go-v2/service/athena v1.55.10 github.com/benlaurie/objecthash v0.0.0-20180202135721-d1e3d6079fc1 github.com/coocood/freecache v1.2.4 - github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 + github.com/dask/dask-kubernetes/v2023 v2023.0.0-20230626103304-abd02cd17b26 github.com/fatih/color v1.13.0 - github.com/fatih/structtag v1.2.0 github.com/flyteorg/stow v0.3.12 github.com/fsnotify/fsnotify v1.9.0 github.com/ghodss/yaml v1.0.0 github.com/go-gormigrate/gormigrate/v2 v2.1.5 github.com/go-test/deep v1.1.1 github.com/golang/protobuf v1.5.4 - github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 github.com/hashicorp/golang-lru v0.5.4 + github.com/imdario/mergo v0.3.16 github.com/jackc/pgconn v1.14.3 - github.com/jackc/pgx/v5 v5.6.0 + github.com/jackc/pgx/v5 v5.7.6 + github.com/kubeflow/training-operator v1.9.3 github.com/lib/pq v1.10.9 github.com/magiconair/properties v1.8.6 github.com/mitchellh/mapstructure v1.5.0 @@ -29,6 +34,9 @@ require ( github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.22.0 github.com/prometheus/common v0.62.0 + github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 + github.com/samber/lo v1.39.0 + github.com/shamaton/msgpack/v2 v2.4.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.9.1 github.com/spf13/pflag v1.0.6 @@ -41,13 +49,15 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.38.0 go.opentelemetry.io/otel/sdk v1.38.0 go.opentelemetry.io/otel/trace v1.38.0 - golang.org/x/net v0.44.0 + golang.org/x/net v0.45.0 + golang.org/x/oauth2 v0.30.0 golang.org/x/time v0.12.0 - golang.org/x/tools v0.36.0 - google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda - google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda - google.golang.org/grpc v1.75.1 + google.golang.org/api v0.247.0 + google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 + google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 + google.golang.org/grpc v1.76.0 google.golang.org/protobuf v1.36.10 + gopkg.in/yaml.v2 v2.4.0 gorm.io/datatypes v1.2.7 gorm.io/driver/postgres v1.6.0 gorm.io/driver/sqlite v1.6.0 @@ -78,39 +88,57 @@ require ( github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.29.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.53.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 // indirect + github.com/NYTimes/gziphandler v1.1.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.0 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect + github.com/aws/aws-sdk-go-v2/credentials v1.16.12 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 // indirect + github.com/aws/smithy-go v1.23.2 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v5 v5.0.3 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 // indirect + github.com/coreos/go-semver v0.3.1 // indirect + github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.12.2 // indirect github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect + github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.9.11 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/fxamacker/cbor/v2 v2.9.0 // indirect - github.com/go-jose/go-jose/v4 v4.1.1 // indirect + github.com/go-jose/go-jose/v4 v4.1.2 // indirect github.com/go-logr/logr v1.4.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-sql-driver/mysql v1.8.1 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v5 v5.2.2 // indirect + github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.26.0 // indirect github.com/google/gnostic-models v0.7.0 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/s2a-go v0.1.9 // indirect github.com/google/uuid v1.6.0 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect github.com/googleapis/gax-go/v2 v2.15.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -127,7 +155,7 @@ require ( github.com/kylelemons/godebug v1.1.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-colorable v0.1.12 // indirect - github.com/mattn/go-isatty v0.0.14 // indirect + github.com/mattn/go-isatty v0.0.16 // indirect github.com/mattn/go-sqlite3 v1.14.22 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect @@ -146,10 +174,12 @@ require ( github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/spiffe/go-spiffe/v2 v2.5.0 // indirect github.com/stoewer/go-strcase v1.3.0 // indirect - github.com/stretchr/objx v0.5.2 // indirect + github.com/stretchr/objx v0.5.3 // indirect github.com/subosito/gotenv v1.2.0 // indirect - github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/errs v1.4.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.9 // indirect + go.etcd.io/etcd/client/v3 v3.5.9 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.36.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect @@ -161,32 +191,49 @@ require ( go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect go.yaml.in/yaml/v2 v2.4.2 // indirect - go.yaml.in/yaml/v3 v3.0.4 // indirect - golang.org/x/crypto v0.42.0 // indirect + golang.org/x/crypto v0.43.0 // indirect golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/sync v0.17.0 // indirect - golang.org/x/sys v0.36.0 // indirect - golang.org/x/term v0.35.0 // indirect - golang.org/x/text v0.29.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/term v0.36.0 // indirect + golang.org/x/text v0.30.0 // indirect + golang.org/x/tools v0.37.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.247.0 // indirect google.golang.org/genproto v0.0.0-20251103181224-f26f9409b101 // indirect - gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.4 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect gorm.io/driver/mysql v1.5.6 // indirect - k8s.io/apiextensions-apiserver v0.34.1 // indirect - k8s.io/apiserver v0.34.1 // indirect - k8s.io/component-base v0.34.1 // indirect + k8s.io/apiextensions-apiserver v0.31.2 // indirect + k8s.io/apiserver v0.28.4 // indirect + k8s.io/component-base v0.28.4 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b // indirect + k8s.io/kms v0.28.3 // indirect + k8s.io/kube-openapi v0.0.0-20240430033511-f0e62f92d13f // indirect sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/randfill v1.0.0 // indirect - sigs.k8s.io/structured-merge-diff/v6 v6.3.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect sigs.k8s.io/yaml v1.6.0 // indirect ) + +replace ( + github.com/flyteorg/flyte/flyteidl => ./flyteidl2 + github.com/flyteorg/flyte/flyteplugins => ./flyteplugins + github.com/flyteorg/flyte/flytestdlib => ./flytestdlib + github.com/flyteorg/flyte/v2 => ./ + github.com/flyteorg/flyte/v2/flyteplugins => ./flyteplugins + github.com/flyteorg/flyte/v2/flytestdlib => ./flytestdlib + github.com/google/gnostic-models => github.com/google/gnostic-models v0.6.8 + github.com/robfig/cron/v3 => github.com/unionai/cron/v3 v3.0.2-0.20220915080349-5790c370e63a + k8s.io/api => k8s.io/api v0.28.2 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.28.2 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.2 + k8s.io/apiserver => k8s.io/apiserver v0.28.2 + k8s.io/client-go => k8s.io/client-go v0.28.2 + k8s.io/component-base => k8s.io/component-base v0.28.2 + k8s.io/klog/v2 => k8s.io/klog/v2 v2.100.1 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f + k8s.io/utils => k8s.io/utils v0.0.0-20230726121419-3b25d923346b + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.16.3 +) diff --git a/go.sum b/go.sum index 43fc0975ed..c325351b94 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,5 @@ -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1 h1:DQLS/rRxLHuugVzjJU5AvOwD57pdFl9he/0O7e5P294= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.9-20250912141014-52f32327d4b0.1/go.mod h1:aY3zbkNan5F+cGm9lITDP6oxJIwu0dn9KjJuJjWaHkg= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20250912141014-52f32327d4b0.1 h1:31on4W/yPcV4nZHL4+UCiCvLPsMqe/vJcNg8Rci0scc= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.10-20250912141014-52f32327d4b0.1/go.mod h1:fUl8CEN/6ZAMk6bP8ahBJPUJw7rbp+j4x+wCcYi2IG4= cel.dev/expr v0.24.0 h1:56OvJKSH3hDGL0ml5uSxZmz3/3Pq4tJ+fb1unVLAFcY= cel.dev/expr v0.24.0/go.mod h1:hLPLo1W4QUmuYdA72RBX06QTs6MXw941piREPl3Yfiw= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -59,8 +59,8 @@ cloud.google.com/go/storage v1.56.0 h1:iixmq2Fse2tqxMbWhLWC9HfBj1qdxqAmiK8/eqtsL cloud.google.com/go/storage v1.56.0/go.mod h1:Tpuj6t4NweCLzlNbw9Z9iwxEkrSem20AetIeH/shgVU= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= cloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s= -connectrpc.com/connect v1.18.1 h1:PAg7CjSAGvscaf6YZKUefjoih5Z/qYkyaTrBW8xvYPw= -connectrpc.com/connect v1.18.1/go.mod h1:0292hj1rnx8oFrStN7cB4jjVBeqs+Yx5yDIC2prWDO8= +connectrpc.com/connect v1.19.1 h1:R5M57z05+90EfEvCY1b7hBxDVOUl45PrtXtAV2fOC14= +connectrpc.com/connect v1.19.1/go.mod h1:tN20fjdGlewnSFeZxLKb0xwIZ6ozc3OQs2hTXy4du9w= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= @@ -86,10 +86,44 @@ github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0 github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.53.0/go.mod h1:jUZ5LYlw40WMd07qxcQJD5M40aUxrfwqQX1g7zxYnrQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0 h1:Ron4zCA/yk6U7WOBXhTJcDpsUBG9npumK6xw2auFltQ= github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.53.0/go.mod h1:cSgYe11MCNYunTnRXrKiR/tHc0eoKjICUuWpNZoVCOo= +github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20200723154620-6f35a1152625 h1:cQyO5JQ2iuHnEcF3v24kdDMsgh04RjyFPDtuvD6PCE0= +github.com/GoogleCloudPlatform/spark-on-k8s-operator v0.0.0-20200723154620-6f35a1152625/go.mod h1:6PnrZv6zUDkrNMw0mIoGRmGBR7i9LulhKPmxFq4rUiM= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/antlr4-go/antlr/v4 v4.13.0 h1:lxCg3LAv+EUK6t1i0y1V6/SLeUi0eKEKdhQAlS8TVTI= github.com/antlr4-go/antlr/v4 v4.13.0/go.mod h1:pfChB/xh/Unjila75QW7+VU4TSnWnnk9UTnmpPaOR2g= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= +github.com/aws/aws-sdk-go-v2 v1.39.6 h1:2JrPCVgWJm7bm83BDwY5z8ietmeJUbh3O2ACnn+Xsqk= +github.com/aws/aws-sdk-go-v2 v1.39.6/go.mod h1:c9pm7VwuW0UPxAEYGyTmyurVcNrbF6Rt/wixFqDhcjE= +github.com/aws/aws-sdk-go-v2/config v1.26.1 h1:z6DqMxclFGL3Zfo+4Q0rLnAZ6yVkzCRxhRMsiRQnD1o= +github.com/aws/aws-sdk-go-v2/config v1.26.1/go.mod h1:ZB+CuKHRbb5v5F0oJtGdhFTelmrxd4iWO1lf0rQwSAg= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12 h1:v/WgB8NxprNvr5inKIiVVrXPuuTegM+K8nncFkr1usU= +github.com/aws/aws-sdk-go-v2/credentials v1.16.12/go.mod h1:X21k0FjEJe+/pauud82HYiQbEr9jRKY3kXEIQ4hXeTQ= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10 h1:w98BT5w+ao1/r5sUuiH6JkVzjowOKeOJRHERyy1vh58= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.14.10/go.mod h1:K2WGI7vUvkIv1HoNbfBA1bvIZ+9kL3YVmWxeKuLQsiw= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13 h1:a+8/MLcWlIxo1lF9xaGt3J/u3yOZx+CdSveSNwjhD40= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.13/go.mod h1:oGnKwIYZ4XttyU2JWxFrwvhF6YKiK/9/wmE3v3Iu9K8= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13 h1:HBSI2kDkMdWz4ZM7FjwE7e/pWDEZ+nR95x8Ztet1ooY= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.13/go.mod h1:YE94ZoDArI7awZqJzBAZ3PDD2zSfuP7w6P2knOzIn8M= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2 h1:GrSw8s0Gs/5zZ0SX+gX4zQjRnRsMJDJ2sLur1gRBhEM= +github.com/aws/aws-sdk-go-v2/internal/ini v1.7.2/go.mod h1:6fQQgfuGmw8Al/3M2IgIllycxV7ZW7WCdVSqfBeUiCY= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.10 h1:lhHg2H2XeRix8Zk2UKxsJXKk93066CAZCw0x5pMRvDw= +github.com/aws/aws-sdk-go-v2/service/athena v1.55.10/go.mod h1:1bY3ff3w7nTDnyGgOAOEZpO7e7bUiG2iDM2tXbCzxjg= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4 h1:/b31bi3YVNlkzkBrm9LfpaKoaYZUxIAj4sHfOTmLfqw= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.10.4/go.mod h1:2aGXHFmbInwgP9ZfpmdIfOELL79zhdNYNmReK8qDfdQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9 h1:Nf2sHxjMJR8CSImIVCONRi4g0Su3J+TSTbS7G0pUeMU= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.10.9/go.mod h1:idky4TER38YIjr2cADF1/ugFMKvZV7p//pVeV5LZbF0= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5 h1:ldSFWz9tEHAwHNmjx2Cvy1MjP5/L9kNoR0skc6wyOOM= +github.com/aws/aws-sdk-go-v2/service/sso v1.18.5/go.mod h1:CaFfXLYL376jgbP7VKC96uFcU8Rlavak0UlAwk1Dlhc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5 h1:2k9KmFawS63euAkY4/ixVNsYYwrwnd5fIvgEKkfZFNM= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.21.5/go.mod h1:W+nd4wWDVkSUIox9bacmkBP5NMFQeTJ/xqNabpzSR38= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5 h1:5UYvv8JUvllZsRnfrcMQ+hJ9jNICmcgKPAO1CER25Wg= +github.com/aws/aws-sdk-go-v2/service/sts v1.26.5/go.mod h1:XX5gh4CB7wAs4KhcF46G6C8a2i7eupU19dcAAE+EydU= +github.com/aws/smithy-go v1.23.2 h1:Crv0eatJUQhaManss33hS5r40CG3ZFH+21XSkqMrIUM= +github.com/aws/smithy-go v1.23.2/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/benlaurie/objecthash v0.0.0-20180202135721-d1e3d6079fc1 h1:VRtJdDi2lqc3MFwmouppm2jlm6icF+7H3WYKpLENMTo= github.com/benlaurie/objecthash v0.0.0-20180202135721-d1e3d6079fc1/go.mod h1:jvdWlw8vowVGnZqSDC7yhPd7AifQeQbRDkZcQXV2nRg= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -115,12 +149,19 @@ github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443 h1:aQ3y1lwWyqYPiWZThqv github.com/cncf/xds/go v0.0.0-20250501225837-2ac532fd4443/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= github.com/coocood/freecache v1.2.4 h1:UdR6Yz/X1HW4fZOuH0Z94KwG851GWOSknua5VUbb/5M= github.com/coocood/freecache v1.2.4/go.mod h1:RBUWa/Cy+OHdfTGFEhEuE1pMCMX51Ncizj7rthiQ3vk= +github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4= +github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec= +github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/dask/dask-kubernetes/v2023 v2023.0.0-20230626103304-abd02cd17b26 h1:6RByIva89lKEvwIzNQSUNcu8NG1p1wwwC4mJfVk/kqw= +github.com/dask/dask-kubernetes/v2023 v2023.0.0-20230626103304-abd02cd17b26/go.mod h1:OqIYr2QnxR3sQK2XahJIyWVcjz38LQ4GNcUzqezFpRg= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.2 h1:DhwDP0vY3k8ZzE0RunuJy8GhNpPL6zqLkDf9B/a0/xU= github.com/emicklei/go-restful/v3 v3.12.2/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -137,24 +178,18 @@ github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJP github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8= github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU= -github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607 h1:cTavhURetDkezJCvxFggiyLeP40Mrk/TtVg2+ycw1Es= -github.com/ernesto-jimenez/gogen v0.0.0-20180125220232-d7d4131e6607/go.mod h1:Cg4fM0vhYWOZdgM7RIOSTRNIc8/VT7CXClC3Ni86lu4= -github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= -github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= +github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= -github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flyteorg/stow v0.3.12 h1:RRXI5RUdxaK6A46HrO0D2r14cRlW1lJRL6qyzqpVMPU= github.com/flyteorg/stow v0.3.12/go.mod h1:nyaBf8ZWkpHWkKIl4rqKI2uXfPx+VbL0PmEtvq4Pxkc= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= -github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM= -github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -162,8 +197,9 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gormigrate/gormigrate/v2 v2.1.5 h1:1OyorA5LtdQw12cyJDEHuTrEV3GiXiIhS4/QTTa/SM8= github.com/go-gormigrate/gormigrate/v2 v2.1.5/go.mod h1:mj9ekk/7CPF3VjopaFvWKN2v7fN3D9d3eEOAXRhi/+M= -github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= -github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= +github.com/go-jose/go-jose/v4 v4.1.2 h1:TK/7NqRQZfgAh+Td8AlsrvtPoUyiHh0LqVvokh+1vHI= +github.com/go-jose/go-jose/v4 v4.1.2/go.mod h1:22cg9HWM1pOlnRiY+9cQYJ9XHmya1bYW8OeDM6Ku6Oo= +github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= @@ -171,12 +207,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= @@ -186,8 +220,11 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U= github.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= @@ -198,6 +235,8 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -227,8 +266,8 @@ github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.26.0 h1:DPGjXackMpJWH680oGY4lZhYjIameYmR+/6RBdDGmaI= github.com/google/cel-go v0.26.0/go.mod h1:A9O8OU9rdvrK5MQyrqfIxo1a0u4g3sF8KB6PUIaryMM= -github.com/google/gnostic-models v0.7.0 h1:qwTtogB15McXDaNqTZdzPJRHvaVJlAl+HVQnLmJEJxo= -github.com/google/gnostic-models v0.7.0/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ= +github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= +github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -238,6 +277,7 @@ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -274,8 +314,16 @@ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5m github.com/googleapis/gax-go/v2 v2.15.0 h1:SyjDc1mGgZU5LncH8gimWo9lW1DtIfPibOG81vgd/bo= github.com/googleapis/gax-go/v2 v2.15.0/go.mod h1:zVVkkxAQHa1RQpg9z2AUCMnKhi0Qld9rcmyfL1OZhoc= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= @@ -284,6 +332,8 @@ github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= +github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= @@ -301,8 +351,8 @@ github.com/jackc/pgproto3/v2 v2.3.3 h1:1HLSx5H+tXR9pW3in3zaztoEwQYRC9SQaYUHjTSUO github.com/jackc/pgproto3/v2 v2.3.3/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= -github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= -github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/pgx/v5 v5.7.6 h1:rWQc5FwZSPX58r1OQmkuaNicxdmExaEz5A2DO2hUuTk= +github.com/jackc/pgx/v5 v5.7.6/go.mod h1:aruU7o91Tc2q2cFp5h4uP3f6ztExVpyVv88Xl/8Vl8M= github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= @@ -313,6 +363,8 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -325,13 +377,14 @@ github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zt github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kubeflow/training-operator v1.9.3 h1:aaSHqskOtCY8Dn8sVd+l9p5XAczW6O0nYMPjVLAw/lc= +github.com/kubeflow/training-operator v1.9.3/go.mod h1:6zI0hgeCOheiW5Z12IcVkKBuSWm714fz8mUvYu4Fid4= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= @@ -344,8 +397,9 @@ github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= @@ -389,12 +443,20 @@ github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1 h1:skD8MXnQMO3QGUeTKt09VOXvuch/gJh8+6q3OLm0kAQ= +github.com/ray-project/kuberay/ray-operator v1.1.0-rc.1/go.mod h1:ZqyKKvMP5nKDldQoKmur+Wcx7wVlV9Q98phFqHzr+KY= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= +github.com/shamaton/msgpack/v2 v2.4.0 h1:O5Z08MRmbo0lA9o2xnQ4TXx6teJbPqEurqcCOQ8Oi/4= +github.com/shamaton/msgpack/v2 v2.4.0/go.mod h1:6khjYnkx73f7VQU7wjcFS9DFjs+59naVWJv1TB7qdOI= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= +github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= @@ -414,8 +476,8 @@ github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8w github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4= +github.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -428,14 +490,32 @@ github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= -github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75/go.mod h1:KO6IkyS8Y3j8OdNO85qEYBsRPuteD+YciPomcXdrMnk= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/zeebo/errs v1.4.0 h1:XNdoD/RRMKP7HD0UhJnIzUy74ISdGGxURlYG8HSWSfM= github.com/zeebo/errs v1.4.0/go.mod h1:sgbWHsvVuTPHcqJJGQ1WhI5KbWlHYz+2+2C/LSEtCw4= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= +go.etcd.io/etcd/api/v3 v3.5.9 h1:4wSsluwyTbGGmyjJktOf3wFQoTBIURXHnq9n/G/JQHs= +go.etcd.io/etcd/api/v3 v3.5.9/go.mod h1:uyAal843mC8uUVSLWz6eHa/d971iDGnCRpmKd2Z+X8k= +go.etcd.io/etcd/client/pkg/v3 v3.5.9 h1:oidDC4+YEuSIQbsR94rY9gur91UPL6DnxDCIYd2IGsE= +go.etcd.io/etcd/client/pkg/v3 v3.5.9/go.mod h1:y+CzeSmkMpWN2Jyu1npecjB9BBnABxGM4pN8cGuJeL4= +go.etcd.io/etcd/client/v2 v2.305.9 h1:YZ2OLi0OvR0H75AcgSUajjd5uqKDKocQUqROTG11jIo= +go.etcd.io/etcd/client/v2 v2.305.9/go.mod h1:0NBdNx9wbxtEQLwAQtrDHwx58m02vXpDcgSYI2seohQ= +go.etcd.io/etcd/client/v3 v3.5.9 h1:r5xghnU7CwbUxD/fbUtRyJGaYNfDun8sp/gTr1hew6E= +go.etcd.io/etcd/client/v3 v3.5.9/go.mod h1:i/Eo5LrZ5IKqpbtpPDuaUnDOUv471oDg8cjQaUr2MbA= +go.etcd.io/etcd/pkg/v3 v3.5.9 h1:6R2jg/aWd/zB9+9JxmijDKStGJAPFsX3e6BeJkMi6eQ= +go.etcd.io/etcd/pkg/v3 v3.5.9/go.mod h1:BZl0SAShQFk0IpLWR78T/+pyt8AruMHhTNNX73hkNVY= +go.etcd.io/etcd/raft/v3 v3.5.9 h1:ZZ1GIHoUlHsn0QVqiRysAm3/81Xx7+i2d7nSdWxlOiI= +go.etcd.io/etcd/raft/v3 v3.5.9/go.mod h1:WnFkqzFdZua4LVlVXQEGhmooLeyS7mqzS4Pf4BCVqXg= +go.etcd.io/etcd/server/v3 v3.5.9 h1:vomEmmxeztLtS5OEH7d0hBAg4cjVIu9wXuNzUZx2ZA0= +go.etcd.io/etcd/server/v3 v3.5.9/go.mod h1:GgI1fQClQCFIzuVjlvdbMxNbnISt90gdfYyqiAIt65g= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -491,8 +571,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.42.0 h1:chiH31gIWm57EkTXpwnqf8qeuMUi0yekh6mT2AvFlqI= -golang.org/x/crypto v0.42.0/go.mod h1:4+rDnOTJhQCx2q7/j6rAN5XDw8kPjeaXEUR2eL94ix8= +golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= +golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -528,8 +608,6 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -562,8 +640,8 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.44.0 h1:evd8IRDyfNBMBTTY5XRF1vaZlD+EmWx6x8PkhR04H/I= -golang.org/x/net v0.44.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= +golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= +golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -626,12 +704,13 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= -golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ= -golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA= +golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= +golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -640,8 +719,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= -golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -696,8 +775,8 @@ golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= +golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -772,10 +851,10 @@ google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20251103181224-f26f9409b101 h1:MgBTzgUJFAmp2PlyqKJecSpZpjFxkYL3nDUIeH/6Q30= google.golang.org/genproto v0.0.0-20251103181224-f26f9409b101/go.mod h1:bbWg36d7wp3knc0hIlmJAnW5R/CQ2rzpEVb72eH4ex4= -google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda h1:+2XxjfsAu6vqFxwGBRcHiMaDCuZiqXGDUDVWVtrFAnE= -google.golang.org/genproto/googleapis/api v0.0.0-20251029180050-ab9386a59fda/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda h1:i/Q+bfisr7gq6feoJnS/DlpdwEL4ihp41fvRiM3Ork0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20251029180050-ab9386a59fda/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101 h1:vk5TfqZHNn0obhPIYeS+cxIFKFQgser/M2jnI+9c6MM= +google.golang.org/genproto/googleapis/api v0.0.0-20251103181224-f26f9409b101/go.mod h1:E17fc4PDhkr22dE3RgnH2hEubUaky6ZwW4VhANxyspg= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101 h1:tRPGkdGHuewF4UisLzzHHr1spKw92qLM98nIzxbC0wY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251103181224-f26f9409b101/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -792,8 +871,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A= +google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -811,12 +890,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= -gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= +gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -846,36 +925,36 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.34.1 h1:jC+153630BMdlFukegoEL8E/yT7aLyQkIVuwhmwDgJM= -k8s.io/api v0.34.1/go.mod h1:SB80FxFtXn5/gwzCoN6QCtPD7Vbu5w2n1S0J5gFfTYk= -k8s.io/apiextensions-apiserver v0.34.1 h1:NNPBva8FNAPt1iSVwIE0FsdrVriRXMsaWFMqJbII2CI= -k8s.io/apiextensions-apiserver v0.34.1/go.mod h1:hP9Rld3zF5Ay2Of3BeEpLAToP+l4s5UlxiHfqRaRcMc= -k8s.io/apimachinery v0.34.1 h1:dTlxFls/eikpJxmAC7MVE8oOeP1zryV7iRyIjB0gky4= -k8s.io/apimachinery v0.34.1/go.mod h1:/GwIlEcWuTX9zKIg2mbw0LRFIsXwrfoVxn+ef0X13lw= -k8s.io/apiserver v0.34.1 h1:U3JBGdgANK3dfFcyknWde1G6X1F4bg7PXuvlqt8lITA= -k8s.io/apiserver v0.34.1/go.mod h1:eOOc9nrVqlBI1AFCvVzsob0OxtPZUCPiUJL45JOTBG0= -k8s.io/client-go v0.34.1 h1:ZUPJKgXsnKwVwmKKdPfw4tB58+7/Ik3CrjOEhsiZ7mY= -k8s.io/client-go v0.34.1/go.mod h1:kA8v0FP+tk6sZA0yKLRG67LWjqufAoSHA2xVGKw9Of8= -k8s.io/component-base v0.34.1 h1:v7xFgG+ONhytZNFpIz5/kecwD+sUhVE6HU7qQUiRM4A= -k8s.io/component-base v0.34.1/go.mod h1:mknCpLlTSKHzAQJJnnHVKqjxR7gBeHRv0rPXA7gdtQ0= -k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= -k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b h1:MloQ9/bdJyIu9lb1PzujOPolHyvO06MXG5TUIj2mNAA= -k8s.io/kube-openapi v0.0.0-20250710124328-f3f2b991d03b/go.mod h1:UZ2yyWbFTpuhSbFhv24aGNOdoRdJZgsIObGBUaYVsts= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzkbzn+gDM4X9T4Ck= -k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.28.2 h1:9mpl5mOb6vXZvqbQmankOfPIGiudghwCoLl1EYfUZbw= +k8s.io/api v0.28.2/go.mod h1:RVnJBsjU8tcMq7C3iaRSGMeaKt2TWEUXcpIt/90fjEg= +k8s.io/apiextensions-apiserver v0.28.2 h1:J6/QRWIKV2/HwBhHRVITMLYoypCoPY1ftigDM0Kn+QU= +k8s.io/apiextensions-apiserver v0.28.2/go.mod h1:5tnkxLGa9nefefYzWuAlWZ7RZYuN/765Au8cWLA6SRg= +k8s.io/apimachinery v0.28.2 h1:KCOJLrc6gu+wV1BYgwik4AF4vXOlVJPdiqn0yAWWwXQ= +k8s.io/apimachinery v0.28.2/go.mod h1:RdzF87y/ngqk9H4z3EL2Rppv5jj95vGS/HaFXrLDApU= +k8s.io/apiserver v0.28.2 h1:rBeYkLvF94Nku9XfXyUIirsVzCzJBs6jMn3NWeHieyI= +k8s.io/apiserver v0.28.2/go.mod h1:f7D5e8wH8MWcKD7azq6Csw9UN+CjdtXIVQUyUhrtb+E= +k8s.io/client-go v0.28.2 h1:DNoYI1vGq0slMBN/SWKMZMw0Rq+0EQW6/AK4v9+3VeY= +k8s.io/client-go v0.28.2/go.mod h1:sMkApowspLuc7omj1FOSUxSoqjr+d5Q0Yc0LOFnYFJY= +k8s.io/component-base v0.28.2 h1:Yc1yU+6AQSlpJZyvehm/NkJBII72rzlEsd6MkBQ+G0E= +k8s.io/component-base v0.28.2/go.mod h1:4IuQPQviQCg3du4si8GpMrhAIegxpsgPngPRR/zWpzc= +k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= +k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.28.3 h1:jYwwAe96XELNjYWv1G4kNzizcFoZ50OOElvPansbw70= +k8s.io/kms v0.28.3/go.mod h1:kSMjU2tg7vjqqoWVVCcmPmNZ/CofPsoTbSxAipCvZuE= +k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f h1:eeEUOoGYWhOz7EyXqhlR2zHKNw2mNJ9vzJmub6YN6kk= +k8s.io/kube-openapi v0.0.0-20230905202853-d090da108d2f/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2 h1:jpcvIRr3GLoUoEKRkHKSmGjxb6lWwrBlJsXc+eUYQHM= sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.31.2/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw= -sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A= -sigs.k8s.io/controller-runtime v0.22.4/go.mod h1:+QX1XUpTXN4mLoblf4tqr5CQcyHPAki2HLXqQMY6vh8= +sigs.k8s.io/controller-runtime v0.16.3 h1:2TuvuokmfXvDUamSx1SuAOO3eTyye+47mJCigwG62c4= +sigs.k8s.io/controller-runtime v0.16.3/go.mod h1:j7bialYoSn142nv9sCOJmQgDXQXxnroFU4VnX/brVJ0= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= -sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco= -sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs= sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=