From 8efce0a14d048e4924c61783ac7bf10e00d67a9e Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 21:30:30 +0200 Subject: [PATCH 1/6] Build iOS libs for v2.12.0 --- .../Headers/TensorFlowLiteC.h | 4 +- .../TensorFlowLiteC.framework/Headers/c_api.h | 594 +++++++++++------- .../Headers/c_api_experimental.h | 36 +- .../Headers/c_api_types.h | 33 +- .../Headers/common.h | 112 +++- .../Headers/profiler.h | 85 +++ .../Headers/telemetry_setting.h | 103 +++ .../Headers/xnnpack_delegate.h | 9 + .../TensorFlowLiteC.framework/TensorFlowLiteC | 4 +- .../TensorFlowLiteCMetal | 4 +- .../Runtime/Delegates/XNNPackDelegate.cs | 7 + 11 files changed, 710 insertions(+), 281 deletions(-) mode change 100644 => 100755 Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_types.h create mode 100755 Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/profiler.h create mode 100755 Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/telemetry_setting.h diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/TensorFlowLiteC.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/TensorFlowLiteC.h index 425ec6d6c..e449ae1bc 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/TensorFlowLiteC.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/TensorFlowLiteC.h @@ -1,6 +1,8 @@ #import #import #import +#import #import +#import +#import #import -#import diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api.h index dc346ce67..5470feb43 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api.h @@ -12,7 +12,7 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// \warning Users of TensorFlow Lite should not include this file directly, /// but should instead include "third_party/tensorflow/lite/c/c_api.h". /// Only the TensorFlow Lite implementation itself should include this /// file directly. @@ -20,6 +20,7 @@ limitations under the License. #define TENSORFLOW_LITE_CORE_C_C_API_H_ #include +#include #include #include @@ -27,6 +28,7 @@ limitations under the License. #include "c_api_types.h" // IWYU pragma: export // -------------------------------------------------------------------------- +/// \file /// C API for TensorFlow Lite. /// /// The API leans towards simplicity and uniformity instead of convenience, as @@ -63,7 +65,7 @@ limitations under the License. /// /// // Extract the output tensor data. /// const TfLiteTensor* output_tensor = -// TfLiteInterpreterGetOutputTensor(interpreter, 0); +/// TfLiteInterpreterGetOutputTensor(interpreter, 0); /// TfLiteTensorCopyToBuffer(output_tensor, output.data(), /// output.size() * sizeof(float)); /// @@ -78,289 +80,340 @@ limitations under the License. extern "C" { #endif // __cplusplus +// This header should be valid in both C (e.g. C99) and C++, +// so 'void' in parameters is not redundant. +// NOLINTBEGIN(modernize-redundant-void-arg) + // -------------------------------------------------------------------------- -// Opaque types used by the C API. +// Opaque types used by the C API. (See also c_api_types.h.) -// TfLiteModel wraps a loaded TensorFlow Lite model. +/// TfLiteModel wraps a loaded TensorFlow Lite model. typedef struct TfLiteModel TfLiteModel; -// TfLiteInterpreterOptions allows customized interpreter configuration. +/// TfLiteInterpreterOptions allows customized interpreter configuration. typedef struct TfLiteInterpreterOptions TfLiteInterpreterOptions; -// Allows delegation of nodes to alternative backends. -typedef struct TfLiteDelegate TfLiteDelegate; - -// TfLiteInterpreter provides inference from a provided model. +/// TfLiteInterpreter provides inference from a provided model. typedef struct TfLiteInterpreter TfLiteInterpreter; -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). +/// A tensor in the interpreter system which is a wrapper around a buffer of +/// data including a dimensionality (or NULL if not currently defined). typedef struct TfLiteTensor TfLiteTensor; -// TfLiteOpaqueContext is an opaque version of TfLiteContext; -// WARNING: This is an experimental type and subject to change. -typedef struct TfLiteOpaqueContext TfLiteOpaqueContext; - -// TfLiteOpaqueNode is an opaque version of TfLiteNode; -// WARNING: This is an experimental type and subject to change. -typedef struct TfLiteOpaqueNode TfLiteOpaqueNode; - -// TfLiteRegistrationExternal is an external version of TfLiteRegistration to -// use custom op registration API. -// WARNING: This is an experimental type and subject to change. +/// TfLiteRegistrationExternal is an external version of TfLiteRegistration to +/// use custom op registration API. +/// \warning This is an experimental type and subject to change. typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal; // -------------------------------------------------------------------------- -// TfLiteVersion returns a string describing version information of the -// TensorFlow Lite library. TensorFlow Lite uses semantic versioning. +/// The TensorFlow Lite Runtime version. +/// +/// Returns a pointer to a statically allocated string that is the version +/// number of the (potentially dynamically loaded) TF Lite Runtime library. +/// TensorFlow Lite uses semantic versioning, and the return value should be +/// in semver 2 format , starting with MAJOR.MINOR.PATCH, +/// e.g. "2.12.0" or "2.13.0-rc2". TFL_CAPI_EXPORT extern const char* TfLiteVersion(void); -// Returns a model from the provided buffer, or null on failure. -// -// NOTE: The caller retains ownership of the `model_data` buffer and should -// ensure that the lifetime of the `model_data` buffer must be at least as long -// as the lifetime of the `TfLiteModel` and of any `TfLiteInterpreter` objects -// created from that `TfLiteModel`, and furthermore the contents of the -// `model_data` buffer must not be modified during that time." +/// The supported TensorFlow Lite model file Schema version. +/// +/// Returns the (major) version number of the Schema used for model +/// files that is supported by the (potentially dynamically loaded) +/// TensorFlow Lite Runtime. +/// +/// Model files using schema versions different to this may not be supported by +/// the current version of the TF Lite Runtime. +TFL_CAPI_EXPORT int TfLiteSchemaVersion(void); + +/// Returns a model from the provided buffer, or null on failure. +/// +/// \note The caller retains ownership of the `model_data` buffer and should +/// ensure that the lifetime of the `model_data` buffer must be at least as long +/// as the lifetime of the `TfLiteModel` and of any `TfLiteInterpreter` objects +/// created from that `TfLiteModel`, and furthermore the contents of the +/// `model_data` buffer must not be modified during that time." TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreate(const void* model_data, size_t model_size); -// Returns a model from the provided file, or null on failure. -// -// NOTE: The file's contents must not be modified during the lifetime of the -// `TfLiteModel` or of any `TfLiteInterpreter` objects created from that -// `TfLiteModel`. +/// Same as `TfLiteModelCreate` with customizble error reporter. +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateWithErrorReporter( + const void* model_data, size_t model_size, + void (*reporter)(void* user_data, const char* format, va_list args), + void* user_data); + +/// Returns a model from the provided file, or null on failure. +/// +/// \note The file's contents must not be modified during the lifetime of the +/// `TfLiteModel` or of any `TfLiteInterpreter` objects created from that +/// `TfLiteModel`. TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFile( const char* model_path); -// Destroys the model instance. -TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model); - -// Returns a new TfLiteRegistrationExternal instance. -// -// NOTE: The caller retains ownership and should ensure that -// the lifetime of the `TfLiteRegistrationExternal` must be at least as long as -// the lifetime of the `TfLiteInterpreter`. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern TfLiteRegistrationExternal* -TfLiteRegistrationExternalCreate(TfLiteBuiltinOperator builtin_code, - const char* custom_name, int version); - -// Return the builtin op code of the provided external 'registration'. -// -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern TfLiteBuiltinOperator -TfLiteRegistrationExternalGetBuiltInCode( - const TfLiteRegistrationExternal* registration); - -// Destroys the TfLiteRegistrationExternal instance. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalDelete( - TfLiteRegistrationExternal* registration); - -// Sets the initialization callback for the registration. -// -// The callback is called to initialize the op from serialized data. -// Please refer `init` of `TfLiteRegistration` for the detail. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInit( - TfLiteRegistrationExternal* registration, - void* (*init)(TfLiteOpaqueContext* context, const char* buffer, - size_t length)); - -// Sets the deallocation callback for the registration. -// -// This callback is called to deallocate the data returned by the init callback. -// The value passed in the `data` parameter is the value that was returned by -// the `init` callback. -// Please refer `free` of `TfLiteRegistration` for the detail. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetFree( - TfLiteRegistrationExternal* registration, - void (*free)(TfLiteOpaqueContext* context, void* data)); - -// Sets the preparation callback for the registration. -// -// The callback is called when the inputs of operator have been resized. -// Please refer `prepare` of `TfLiteRegistration` for the detail. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetPrepare( - TfLiteRegistrationExternal* registration, - TfLiteStatus (*prepare)(TfLiteOpaqueContext* context, - TfLiteOpaqueNode* node)); +/// Same as `TfLiteModelCreateFromFile` with customizble error reporter. +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. +TFL_CAPI_EXPORT extern TfLiteModel* TfLiteModelCreateFromFileWithErrorReporter( + const char* model_path, + void (*reporter)(void* user_data, const char* format, va_list args), + void* user_data); -// Sets the invocation callback for the registration. -// -// The callback is called when the operator is executed. -// Please refer `invoke` of `TfLiteRegistration` for the detail. -// WARNING: This is an experimental API and subject to change. -TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInvoke( - TfLiteRegistrationExternal* registration, - TfLiteStatus (*invoke)(TfLiteOpaqueContext* context, - TfLiteOpaqueNode* node)); +/// Destroys the model instance. +TFL_CAPI_EXPORT extern void TfLiteModelDelete(TfLiteModel* model); -// Returns a new interpreter options instances. +/// Returns a new interpreter options instances. TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TfLiteInterpreterOptionsCreate(); -// Destroys the interpreter options instance. +/// Creates and returns a shallow copy of an options object. +/// +/// The caller is responsible for calling `TfLiteInterpreterOptionsDelete` to +/// deallocate the object pointed to by the returned pointer. +TFL_CAPI_EXPORT extern TfLiteInterpreterOptions* TfLiteInterpreterOptionsCopy( + const TfLiteInterpreterOptions* from); + +/// Destroys the interpreter options instance. TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsDelete( TfLiteInterpreterOptions* options); -// Sets the number of CPU threads to use for the interpreter. +/// Sets the number of CPU threads to use for the interpreter. TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetNumThreads( TfLiteInterpreterOptions* options, int32_t num_threads); -// Adds a delegate to be applied during `TfLiteInterpreter` creation. -// -// If delegate application fails, interpreter creation will also fail with an -// associated error logged. -// -// NOTE: The caller retains ownership of the delegate and should ensure that it -// remains valid for the duration of any created interpreter's lifetime. +/// Adds a delegate to be applied during `TfLiteInterpreter` creation. +/// +/// If delegate application fails, interpreter creation will also fail with an +/// associated error logged. +/// +/// \note The caller retains ownership of the delegate and should ensure that it +/// remains valid for the duration of any created interpreter's lifetime. +/// +/// If you are NOT using "TensorFlow Lite in Play Services", and NOT building +/// with `TFLITE_WITH_STABLE_ABI` or `TFLITE_USE_OPAQUE_DELEGATE` macros +/// enabled, it is possible to pass a `TfLiteDelegate*` rather than a +/// `TfLiteOpaqueDelegate*` to this function, since in those cases, +/// `TfLiteOpaqueDelegate` is just a typedef alias for `TfLiteDelegate`. +/// This is for compatibility with existing source code +/// and existing delegates. For new delegates, it is recommended to +/// use `TfLiteOpaqueDelegate` rather than `TfLiteDelegate`. (See +/// `TfLiteOpaqueDelegate` in tensorflow/lite/core/c/c_api_types.h.) TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddDelegate( - TfLiteInterpreterOptions* options, TfLiteDelegate* delegate); - -// Adds an opaque delegate to be applied during `TfLiteInterpreter` creation. -// -// If delegate application fails, interpreter creation will also fail with an -// associated error logged. -// -// NOTE: The caller retains ownership of the delegate and should ensure that it -// remains valid for the duration of any created interpreter's lifetime. -TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddOpaqueDelegate( - TfLiteInterpreterOptions* options, - TfLiteOpaqueDelegateStruct* opaque_delegate); + TfLiteInterpreterOptions* options, TfLiteOpaqueDelegate* delegate); -// Sets a custom error reporter for interpreter execution. -// -// * `reporter` takes the provided `user_data` object, as well as a C-style -// format string and arg list (see also vprintf). -// * `user_data` is optional. If non-null, it is owned by the client and must -// remain valid for the duration of the interpreter lifetime. +/// Sets a custom error reporter for interpreter execution. +/// +/// * `reporter` takes the provided `user_data` object, as well as a C-style +/// format string and arg list (see also vprintf). +/// * `user_data` is optional. If non-null, it is owned by the client and must +/// remain valid for the duration of the interpreter lifetime. TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetErrorReporter( TfLiteInterpreterOptions* options, void (*reporter)(void* user_data, const char* format, va_list args), void* user_data); -// Adds an op registration to be applied during `TfLiteInterpreter` creation. -// -// The `TfLiteRegistrationExternal` object is needed to implement custom op of -// TFLite Interpreter via C API. Calling this function ensures that any -// `TfLiteInterpreter` created with the specified `options` can execute models -// that use the custom operator specified in `registration`. -// Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op -// support. -// NOTE: The caller retains ownership of the TfLiteRegistrationExternal object -// and should ensure that it remains valid for the duration of any created -// interpreter's lifetime. -// WARNING: This is an experimental API and subject to change. +/// Adds an op registration to be applied during `TfLiteInterpreter` creation. +/// +/// The `TfLiteRegistrationExternal` object is needed to implement custom op of +/// TFLite Interpreter via C API. Calling this function ensures that any +/// `TfLiteInterpreter` created with the specified `options` can execute models +/// that use the custom operator specified in `registration`. +/// Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op +/// support. +/// \note The caller retains ownership of the TfLiteRegistrationExternal object +/// and should ensure that it remains valid for the duration of any created +/// interpreter's lifetime. +/// \warning This is an experimental API and subject to change. TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsAddRegistrationExternal( TfLiteInterpreterOptions* options, TfLiteRegistrationExternal* registration); -// Returns a new interpreter using the provided model and options, or null on -// failure. -// -// * `model` must be a valid model instance. The caller retains ownership of the -// object, and may destroy it (via TfLiteModelDelete) immediately after -// creating the interpreter. However, if the TfLiteModel was allocated with -// TfLiteModelCreate, then the `model_data` buffer that was passed to -// TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter object -// that this function returns, and must not be modified during that time; -// and if the TfLiteModel was allocated with TfLiteModelCreateFromFile, then -// the contents of the model file must not be modified during the lifetime of -// the TfLiteInterpreter object that this function returns. -// * `optional_options` may be null. The caller retains ownership of the object, -// and can safely destroy it (via TfLiteInterpreterOptionsDelete) immediately -// after creating the interpreter. -// -// NOTE: The client *must* explicitly allocate tensors before attempting to -// access input tensor data or invoke the interpreter. +/// Enables users to cancel in-flight invocations with +/// `TfLiteInterpreterCancel`. +/// +/// By default it is disabled and calling to `TfLiteInterpreterCancel` will +/// return kTfLiteError. See `TfLiteInterpreterCancel`. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterOptionsEnableCancellation( + TfLiteInterpreterOptions* options, bool enable); + +/// Returns a new interpreter using the provided model and options, or null on +/// failure. +/// +/// * `model` must be a valid model instance. The caller retains ownership of +/// the object, and may destroy it (via TfLiteModelDelete) immediately after +/// creating the interpreter. However, if the TfLiteModel was allocated with +/// TfLiteModelCreate, then the `model_data` buffer that was passed to +/// TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter +/// object that this function returns, and must not be modified during that +/// time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile, +/// then the contents of the model file must not be modified during the +/// lifetime of the TfLiteInterpreter object that this function returns. +/// * `optional_options` may be null. The caller retains ownership of the +/// object, and can safely destroy it (via TfLiteInterpreterOptionsDelete) +/// immediately after creating the interpreter. +/// +/// \note The client *must* explicitly allocate tensors before attempting to +/// access input tensor data or invoke the interpreter. TFL_CAPI_EXPORT extern TfLiteInterpreter* TfLiteInterpreterCreate( const TfLiteModel* model, const TfLiteInterpreterOptions* optional_options); -// Destroys the interpreter. +/// Destroys the interpreter. TFL_CAPI_EXPORT extern void TfLiteInterpreterDelete( TfLiteInterpreter* interpreter); -// Returns the number of input tensors associated with the model. +/// Returns the number of input tensors associated with the model. TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetInputTensorCount( const TfLiteInterpreter* interpreter); -// Returns the tensor associated with the input index. -// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) +/// Returns a pointer to an array of input tensor indices. The length of the +/// array can be obtained via a call to `TfLiteInterpreterGetInputTensorCount`. +/// +/// Typically the input tensors associated with an `interpreter` would be set +/// during the initialization of the `interpreter`, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `SetInputs` on the interpreter invalidate the returned pointer. +/// +/// The ownership of the array remains with the TFLite runtime. +TFL_CAPI_EXPORT const int* TfLiteInterpreterInputTensorIndices( + const TfLiteInterpreter* interpreter); + +/// Returns the tensor associated with the input index. +/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) TFL_CAPI_EXPORT extern TfLiteTensor* TfLiteInterpreterGetInputTensor( const TfLiteInterpreter* interpreter, int32_t input_index); -// Resizes the specified input tensor. -// -// NOTE: After a resize, the client *must* explicitly allocate tensors before -// attempting to access the resized tensor data or invoke the interpreter. -// -// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) -// -// This function makes a copy of the input dimensions, so the client can safely -// deallocate `input_dims` immediately after this function returns. +/// Resizes the specified input tensor. +/// +/// \note After a resize, the client *must* explicitly allocate tensors before +/// attempting to access the resized tensor data or invoke the interpreter. +/// +/// REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor) +/// +/// This function makes a copy of the input dimensions, so the client can safely +/// deallocate `input_dims` immediately after this function returns. TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterResizeInputTensor( TfLiteInterpreter* interpreter, int32_t input_index, const int* input_dims, int32_t input_dims_size); -// Updates allocations for all tensors, resizing dependent tensors using the -// specified input tensor dimensionality. -// -// This is a relatively expensive operation, and need only be called after -// creating the graph and/or resizing any inputs. +/// Updates allocations for all tensors, resizing dependent tensors using the +/// specified input tensor dimensionality. +/// +/// This is a relatively expensive operation, and need only be called after +/// creating the graph and/or resizing any inputs. TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterAllocateTensors( TfLiteInterpreter* interpreter); -// Runs inference for the loaded graph. -// -// Before calling this function, the caller should first invoke -// TfLiteInterpreterAllocateTensors() and should also set the values for the -// input tensors. After successfully calling this function, the values for the -// output tensors will be set. -// -// NOTE: It is possible that the interpreter is not in a ready state to -// evaluate (e.g., if AllocateTensors() hasn't been called, or if a -// ResizeInputTensor() has been performed without a subsequent call to -// AllocateTensors()). -// -// If the (experimental!) delegate fallback option was enabled in the -// interpreter options, then the interpreter will automatically fall back to -// not using any delegates if execution with delegates fails. For details, see -// TfLiteInterpreterOptionsSetEnableDelegateFallback in c_api_experimental.h. -// -// Returns one of the following status codes: -// - kTfLiteOk: Success. Output is valid. -// - kTfLiteDelegateError: Execution with delegates failed, due to a problem -// with the delegate(s). If fallback was not enabled, output is invalid. -// If fallback was enabled, this return value indicates that fallback -// succeeded, the output is valid, and all delegates previously applied to -// the interpreter have been undone. -// - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that -// the problem was not with the delegate itself, but rather was -// due to an incompatibility between the delegate(s) and the -// interpreter or model. -// - kTfLiteError: Unexpected/runtime failure. Output is invalid. - +/// Runs inference for the loaded graph. +/// +/// Before calling this function, the caller should first invoke +/// TfLiteInterpreterAllocateTensors() and should also set the values for the +/// input tensors. After successfully calling this function, the values for the +/// output tensors will be set. +/// +/// \note It is possible that the interpreter is not in a ready state to +/// evaluate (e.g., if AllocateTensors() hasn't been called, or if a +/// ResizeInputTensor() has been performed without a subsequent call to +/// AllocateTensors()). +/// +/// If the (experimental!) delegate fallback option was enabled in the +/// interpreter options, then the interpreter will automatically fall back to +/// not using any delegates if execution with delegates fails. For details, +/// see TfLiteInterpreterOptionsSetEnableDelegateFallback in +/// c_api_experimental.h. +/// +/// Returns one of the following status codes: +/// - kTfLiteOk: Success. Output is valid. +/// - kTfLiteDelegateError: Execution with delegates failed, due to a problem +/// with the delegate(s). If fallback was not enabled, output is invalid. +/// If fallback was enabled, this return value indicates that fallback +/// succeeded, the output is valid, and all delegates previously applied to +/// the interpreter have been undone. +/// - kTfLiteApplicationError: Same as for kTfLiteDelegateError, except that +/// the problem was not with the delegate itself, but rather was +/// due to an incompatibility between the delegate(s) and the +/// interpreter or model. +/// - kTfLiteError: Unexpected/runtime failure. Output is invalid. TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterInvoke( TfLiteInterpreter* interpreter); -// Returns the number of output tensors associated with the model. +/// Returns the number of output tensors associated with the model. TFL_CAPI_EXPORT extern int32_t TfLiteInterpreterGetOutputTensorCount( const TfLiteInterpreter* interpreter); -// Returns the tensor associated with the output index. -// REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor) -// -// NOTE: The shape and underlying data buffer for output tensors may be not -// be available until after the output tensor has been both sized and allocated. -// In general, best practice is to interact with the output tensor *after* -// calling TfLiteInterpreterInvoke(). +/// Returns a pointer to an array of output tensor indices. The length of the +/// array can be obtained via a call to `TfLiteInterpreterGetOutputTensorCount`. +/// +/// Typically the output tensors associated with an `interpreter` would be set +/// during the initialization of the `interpreter`, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `SetOutputs` on the interpreter invalidate the returned pointer. +/// +/// The ownership of the array remains with the TFLite runtime. +TFL_CAPI_EXPORT const int* TfLiteInterpreterOutputTensorIndices( + const TfLiteInterpreter* interpreter); + +/// Returns the tensor associated with the output index. +/// REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor) +/// +/// \note The shape and underlying data buffer for output tensors may be not +/// be available until after the output tensor has been both sized and +/// allocated. +/// In general, best practice is to interact with the output tensor *after* +/// calling TfLiteInterpreterInvoke(). TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor( const TfLiteInterpreter* interpreter, int32_t output_index); +/// Returns modifiable access to the tensor that corresponds to the +/// specified `index` and is associated with the provided `interpreter`. +/// +/// This requires the `index` to be between 0 and N - 1, where N is the +/// number of tensors in the model. +/// +/// Typically the tensors associated with the `interpreter` would be set during +/// the `interpreter` initialization, through a mechanism like the +/// `InterpreterBuilder`, and remain unchanged throughout the lifetime of the +/// interpreter. However, there are some circumstances in which the pointer may +/// not remain valid throughout the lifetime of the interpreter, because calls +/// to `AddTensors` on the interpreter invalidate the returned pointer. +/// +/// Note the difference between this function and +/// `TfLiteInterpreterGetInputTensor` (or `TfLiteInterpreterGetOutputTensor` for +/// that matter): `TfLiteInterpreterGetTensor` takes an index into the array of +/// all tensors associated with the `interpreter`'s model, whereas +/// `TfLiteInterpreterGetInputTensor` takes an index into the array of input +/// tensors. +/// +/// The ownership of the tensor remains with the TFLite runtime, meaning the +/// caller should not deallocate the pointer. +TFL_CAPI_EXPORT +TfLiteTensor* TfLiteInterpreterGetTensor(const TfLiteInterpreter* interpreter, + int index); + +/// Tries to cancel any in-flight invocation. +/// +/// \note This only cancels `TfLiteInterpreterInvoke` calls that happen before +/// calling this and it does not cancel subsequent invocations. +/// \note Calling this function will also cancel any in-flight invocations of +/// SignatureRunners constructed from this interpreter. +/// Non-blocking and thread safe. +/// +/// Returns kTfLiteError if cancellation is not enabled via +/// `TfLiteInterpreterOptionsEnableCancellation`. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteInterpreterCancel( + const TfLiteInterpreter* interpreter); + // -------------------------------------------------------------------------- // TfLiteTensor wraps data associated with a graph tensor. // @@ -369,49 +422,122 @@ TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteInterpreterGetOutputTensor( // language bindings. In the future the tensor struct will likely be made opaque // in the public API. -// Returns the type of a tensor element. +/// Returns the type of a tensor element. TFL_CAPI_EXPORT extern TfLiteType TfLiteTensorType(const TfLiteTensor* tensor); -// Returns the number of dimensions that the tensor has. +/// Returns the number of dimensions that the tensor has. Returns -1 in case +/// the 'opaque_tensor' does not have its dimensions property set. TFL_CAPI_EXPORT extern int32_t TfLiteTensorNumDims(const TfLiteTensor* tensor); -// Returns the length of the tensor in the "dim_index" dimension. -// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor) +/// Returns the length of the tensor in the "dim_index" dimension. +/// REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor) TFL_CAPI_EXPORT extern int32_t TfLiteTensorDim(const TfLiteTensor* tensor, int32_t dim_index); -// Returns the size of the underlying data in bytes. +/// Returns the size of the underlying data in bytes. TFL_CAPI_EXPORT extern size_t TfLiteTensorByteSize(const TfLiteTensor* tensor); -// Returns a pointer to the underlying data buffer. -// -// NOTE: The result may be null if tensors have not yet been allocated, e.g., -// if the Tensor has just been created or resized and `TfLiteAllocateTensors()` -// has yet to be called, or if the output tensor is dynamically sized and the -// interpreter hasn't been invoked. +/// Returns a pointer to the underlying data buffer. +/// +/// \note The result may be null if tensors have not yet been allocated, e.g., +/// if the Tensor has just been created or resized and `TfLiteAllocateTensors()` +/// has yet to be called, or if the output tensor is dynamically sized and the +/// interpreter hasn't been invoked. TFL_CAPI_EXPORT extern void* TfLiteTensorData(const TfLiteTensor* tensor); -// Returns the (null-terminated) name of the tensor. +/// Returns the (null-terminated) name of the tensor. TFL_CAPI_EXPORT extern const char* TfLiteTensorName(const TfLiteTensor* tensor); -// Returns the parameters for asymmetric quantization. The quantization -// parameters are only valid when the tensor type is `kTfLiteUInt8` and the -// `scale != 0`. Quantized values can be converted back to float using: -// real_value = scale * (quantized_value - zero_point); +/// Returns the parameters for asymmetric quantization. The quantization +/// parameters are only valid when the tensor type is `kTfLiteUInt8` and the +/// `scale != 0`. Quantized values can be converted back to float using: +/// real_value = scale * (quantized_value - zero_point); TFL_CAPI_EXPORT extern TfLiteQuantizationParams TfLiteTensorQuantizationParams( const TfLiteTensor* tensor); -// Copies from the provided input buffer into the tensor's buffer. -// REQUIRES: input_data_size == TfLiteTensorByteSize(tensor) +/// Copies from the provided input buffer into the tensor's buffer. +/// REQUIRES: input_data_size == TfLiteTensorByteSize(tensor) TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyFromBuffer( TfLiteTensor* tensor, const void* input_data, size_t input_data_size); -// Copies to the provided output buffer from the tensor's buffer. -// REQUIRES: output_data_size == TfLiteTensorByteSize(tensor) +/// Copies to the provided output buffer from the tensor's buffer. +/// REQUIRES: output_data_size == TfLiteTensorByteSize(tensor) TFL_CAPI_EXPORT extern TfLiteStatus TfLiteTensorCopyToBuffer( const TfLiteTensor* output_tensor, void* output_data, size_t output_data_size); +/// Returns a new TfLiteRegistrationExternal instance. +/// +/// \note The caller retains ownership and should ensure that +/// the lifetime of the `TfLiteRegistrationExternal` must be at least as long as +/// the lifetime of the `TfLiteInterpreter`. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteRegistrationExternal* +TfLiteRegistrationExternalCreate(TfLiteBuiltinOperator builtin_code, + const char* custom_name, int version); + +/// Return the builtin op code of the provided external 'registration'. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteBuiltinOperator +TfLiteRegistrationExternalGetBuiltInCode( + const TfLiteRegistrationExternal* registration); + +/// Returns the custom name of the provided 'registration'. The returned pointer +/// will be non-null iff the op is a custom op. +/// +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern const char* TfLiteRegistrationExternalGetCustomName( + const TfLiteRegistrationExternal* registration); + +/// Destroys the TfLiteRegistrationExternal instance. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalDelete( + TfLiteRegistrationExternal* registration); + +/// Sets the initialization callback for the registration. +/// +/// The callback is called to initialize the op from serialized data. +/// Please refer `init` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInit( + TfLiteRegistrationExternal* registration, + void* (*init)(TfLiteOpaqueContext* context, const char* buffer, + size_t length)); + +/// Sets the deallocation callback for the registration. +/// +/// This callback is called to deallocate the data returned by the init +/// callback. The value passed in the `data` parameter is the value that was +/// returned by the `init` callback. +/// Please refer `free` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetFree( + TfLiteRegistrationExternal* registration, + void (*free)(TfLiteOpaqueContext* context, void* data)); + +/// Sets the preparation callback for the registration. +/// +/// The callback is called when the inputs of operator have been resized. +/// Please refer `prepare` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetPrepare( + TfLiteRegistrationExternal* registration, + TfLiteStatus (*prepare)(TfLiteOpaqueContext* context, + TfLiteOpaqueNode* node)); + +/// Sets the invocation callback for the registration. +/// +/// The callback is called when the operator is executed. +/// Please refer `invoke` of `TfLiteRegistration` for the detail. +/// \warning This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteRegistrationExternalSetInvoke( + TfLiteRegistrationExternal* registration, + TfLiteStatus (*invoke)(TfLiteOpaqueContext* context, + TfLiteOpaqueNode* node)); + +// NOLINTEND(modernize-redundant-void-arg) + #ifdef __cplusplus } // extern "C" #endif // __cplusplus diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_experimental.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_experimental.h index 3c591ce17..a67b22c8d 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_experimental.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_experimental.h @@ -12,8 +12,13 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ -#define TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/c_api_experimental.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ #include "builtin_ops.h" #include "c_api.h" @@ -380,14 +385,39 @@ TFL_CAPI_EXPORT extern const char* TfLiteSignatureRunnerGetOutputName( TFL_CAPI_EXPORT extern const TfLiteTensor* TfLiteSignatureRunnerGetOutputTensor( const TfLiteSignatureRunner* signature_runner, const char* output_name); +/// Attempts to cancel in flight invocation if any. +/// This will not affect calls to `Invoke` that happend after this. +/// Non blocking and thread safe. +/// Returns kTfLiteError if cancellation is not enabled, otherwise returns +/// kTfLiteOk. +/// NOTE: Calling this function will cancel in-flight invocations +/// in all SignatureRunners built from the same interpreter. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern TfLiteStatus TfLiteSignatureRunnerCancel( + TfLiteSignatureRunner* signature_runner); + /// Destroys the signature runner. /// /// WARNING: This is an experimental API and subject to change. TFL_CAPI_EXPORT extern void TfLiteSignatureRunnerDelete( TfLiteSignatureRunner* signature_runner); +// Forward declaration, to avoid need for dependency on +// tensorflow/lite/profiling/telemetry/profiler.h. +struct TfLiteTelemetryProfilerStruct; + +/// Registers the telemetry profiler to the interpreter. +/// Note: The interpreter does not take the ownership of profiler, but callers +/// must ensure profiler->data outlives the lifespan of the interpreter. +/// +/// WARNING: This is an experimental API and subject to change. +TFL_CAPI_EXPORT extern void TfLiteInterpreterOptionsSetTelemetryProfiler( + TfLiteInterpreterOptions* options, + struct TfLiteTelemetryProfilerStruct* profiler); + #ifdef __cplusplus } // extern "C" #endif // __cplusplus -#endif // TENSORFLOW_LITE_C_C_API_EXPERIMENTAL_H_ +#endif // TENSORFLOW_LITE_CORE_C_C_API_EXPERIMENTAL_H_ diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_types.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_types.h old mode 100644 new mode 100755 index 9d7668e13..3aab43f44 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_types.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/c_api_types.h @@ -16,8 +16,14 @@ limitations under the License. // This file declares types used by the pure C inference API defined in c_api.h, // some of which are also used in the C++ and C kernel and interpreter APIs. -#ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_ -#define TENSORFLOW_LITE_C_C_API_TYPES_H_ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/c_api_types.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. + +#ifndef TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ #include @@ -132,16 +138,31 @@ typedef struct TfLiteOpaqueNode TfLiteOpaqueNode; // TfLiteOpaqueTensor is an opaque version of TfLiteTensor; typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor; -// TfLiteOpaqueDelegateStruct: opaque version of TfLiteDelegate; allows -// delegation of nodes to alternative backends. +// TfLiteDelegate: allows delegation of nodes to alternative backends. +// Forward declaration of concrete type declared in common.h. +typedef struct TfLiteDelegate TfLiteDelegate; + +// TfLiteOpaqueDelegateStruct: unconditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. // // This is an abstract type that is intended to have the same -// role as TfLiteDelegate from common.h, but without exposing the implementation +// role as TfLiteDelegate, but without exposing the implementation // details of how delegates are implemented. // WARNING: This is an experimental type and subject to change. typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct; +// TfLiteOpaqueDelegate: conditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. +// For TF Lite in Play Services, this is an opaque type, +// but for regular TF Lite, this is just a typedef for TfLiteDelegate. +// WARNING: This is an experimental type and subject to change. +#if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE +typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate; +#else +typedef TfLiteDelegate TfLiteOpaqueDelegate; +#endif + #ifdef __cplusplus } // extern C #endif -#endif // TENSORFLOW_LITE_C_C_API_TYPES_H_ +#endif // TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/common.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/common.h index 5bff3e114..db4e85a71 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/common.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/common.h @@ -33,9 +33,16 @@ limitations under the License. // NOTE: The order of values in these structs are "semi-ABI stable". New values // should be added only to the end of structs and never reordered. -#ifndef TENSORFLOW_LITE_C_COMMON_H_ -#define TENSORFLOW_LITE_C_COMMON_H_ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/common.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +#define TENSORFLOW_LITE_CORE_C_COMMON_H_ + +#include #include #include #include @@ -63,7 +70,6 @@ typedef enum TfLiteExternalContextType { struct TfLiteContext; struct TfLiteDelegate; struct TfLiteRegistration; -struct TfLiteOpaqueDelegateStruct; struct TfLiteOpaqueDelegateBuilder; // An external context is a collection of information unrelated to the TF Lite @@ -463,8 +469,9 @@ typedef struct TfLiteTensor { // Optional. Encodes shapes with unknown dimensions with -1. This field is // only populated when unknown dimensions exist in a read-write tensor (i.e. // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). Note that this field only - // exists when TF_LITE_STATIC_MEMORY is not defined. + // `dims_signature` contains [1, -1, -1, 3]). If no unknown dimensions exist + // then `dims_signature` is either null, or set to an empty array. Note that + // this field only exists when TF_LITE_STATIC_MEMORY is not defined. const TfLiteIntArray* dims_signature; } TfLiteTensor; @@ -642,23 +649,26 @@ void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst); // Change the size of the memory block owned by `tensor` to `num_bytes`. -// Tensors with allocation types other than kTfLiteDynamic will be ignored. +// Tensors with allocation types other than `kTfLiteDynamic` will be ignored and +// a kTfLiteOk will be returned. // `tensor`'s internal data buffer will be assigned a pointer // which can safely be passed to free or realloc if `num_bytes` is zero. -// Behaviour is undefined if `tensor` is NULL. // If `preserve_data` is true, tensor data will be unchanged in the range from -// the start of the region up to the minimum of the old and new sizes. -void TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, - bool preserve_data); +// the start of the region up to the minimum of the old and new sizes. In the +// case of NULL tensor, or an error allocating new memory, returns +// `kTfLiteError`. +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data); // Change the size of the memory block owned by `tensor` to `num_bytes`. -// Tensors with allocation types other than kTfLiteDynamic will be ignored. +// Tensors with allocation types other than kTfLiteDynamic will be ignored and +// a kTfLiteOk will be returned. // `tensor`'s internal data buffer will be assigned a pointer // which can safely be passed to free or realloc if `num_bytes` is zero. -// Behaviour is undefined if `tensor` is NULL. // Tensor data will be unchanged in the range from the start of the region up to -// the minimum of the old and new sizes. -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); +// the minimum of the old and new sizes. In the case +// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`. +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); #endif // TF_LITE_STATIC_MEMORY // WARNING: This is an experimental interface that is subject to change. @@ -675,6 +685,22 @@ typedef struct TfLiteDelegateParams { TfLiteIntArray* output_tensors; } TfLiteDelegateParams; +// WARNING: This is an experimental interface that is subject to change. +// +// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's +// trivially destructable. It will be stored as `builtin_data` field in +// `TfLiteNode` of the delegate node. +// +// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc` +// details. +typedef struct TfLiteOpaqueDelegateParams { + TfLiteOpaqueDelegate* delegate; + void* delegate_data; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteOpaqueDelegateParams; + typedef struct TfLiteContext { // Number of tensors in the context. size_t tensors_size; @@ -983,7 +1009,15 @@ typedef enum TfLiteDelegateFlags { // 3. This flag requires that the original execution plan only have ops with // valid registrations (and not 'dummy' custom ops like with Flex). // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2 + kTfLiteDelegateFlagsRequirePropagatedShapes = 2, + + // This flag can be used by delegates to request per-operator profiling. If a + // node is a delegate node, this flag will be checked before profiling. If + // set, then the node will not be profiled. The delegate will then add per + // operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and + // the results will appear in the operator-wise Profiling section and not in + // the Delegate internal section. + kTfLiteDelegateFlagsPerOperatorProfiling = 4 } TfLiteDelegateFlags; // WARNING: This is an experimental interface that is subject to change. @@ -1044,7 +1078,7 @@ typedef struct TfLiteDelegate { TfLiteDelegate TfLiteDelegateCreate(void); // `TfLiteOpaqueDelegateBuilder` is used for constructing -// `TfLiteOpaqueDelegateStruct`, see `TfLiteOpaqueDelegateCreate` below. Note: +// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` below. Note: // This struct is not ABI stable. // // For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should @@ -1064,47 +1098,59 @@ typedef struct TfLiteOpaqueDelegateBuilder { // to ask the TensorFlow lite runtime to create macro-nodes to represent // delegated subgraphs of the original graph. TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT - struct TfLiteOpaqueDelegateStruct* delegate, - void* data); + TfLiteOpaqueDelegate* delegate, void* data); // Copies the data from delegate buffer handle into raw memory of the given // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as // long as it follows the rules for kTfLiteDynamic tensors, in which case this // cannot be null. TfLiteStatus (*CopyFromBufferHandle)( // NOLINT - TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); // Copies the data from raw memory of the given 'tensor' to delegate buffer // handle. This can be null if the delegate doesn't use its own buffer. TfLiteStatus (*CopyToBufferHandle)( // NOLINT - TfLiteOpaqueContext* context, struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); // Frees the Delegate Buffer Handle. Note: This only frees the handle, but // this doesn't release the underlying resource (e.g. textures). The // resources are either owned by application layer or the delegate. // This can be null if the delegate doesn't use its own buffer. void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT - struct TfLiteOpaqueDelegateStruct* delegate, - void* data, TfLiteBufferHandle* handle); + TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle* handle); // Bitmask flags. See the comments in `TfLiteDelegateFlags`. int64_t flags; } TfLiteOpaqueDelegateBuilder; // Creates an opaque delegate and returns its address. The opaque delegate will // behave according to the provided 'opaque_delegate_builder'. The lifetime of -// the fields within the 'opaque_delegate_builder' must outlive any interaction -// between the runtime and the returned 'TfLiteOpaqueDelegateStruct'. The -// returned address should be passed to 'TfLiteOpaqueDelegateDelete' for -// deletion. If 'opaque_delegate_builder' is a null pointer, then a null -// pointer will be returned. -struct TfLiteOpaqueDelegateStruct* TfLiteOpaqueDelegateCreate( +// the objects pointed to by any of the fields within the +// 'opaque_delegate_builder' must outlive the returned +// 'TfLiteOpaqueDelegate' and any 'TfLiteInterpreter', +// 'TfLiteInterpreterOptions', 'tflite::Interpreter', or +// 'tflite::InterpreterBuilder' that the delegate is added to. The returned +// address should be passed to 'TfLiteOpaqueDelegateDelete' for deletion. If +// 'opaque_delegate_builder' is a null pointer, then a null pointer will be +// returned. +TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate( const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder); // Deletes the provided opaque 'delegate'. This function has no effect if the // 'delegate' is a null pointer. -void TfLiteOpaqueDelegateDelete( - const struct TfLiteOpaqueDelegateStruct* delegate); +void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* delegate); + +// Returns a pointer to the data associated with the provided opaque 'delegate'. +// +// A null pointer will be returned when: +// - The 'delegate' is null. +// - The 'data' field of the 'TfLiteOpaqueDelegateBuilder' used to construct the +// 'delegate' was null. +// - Or in case of any other error. +// - The 'delegate' has been constructed via a 'TfLiteOpaqueDelegateBuilder', +// but the 'data' field of the 'TfLiteOpaqueDelegateBuilder' is null. +void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate); #ifdef __cplusplus } // extern "C" #endif // __cplusplus -#endif // TENSORFLOW_LITE_C_COMMON_H_ +#endif // TENSORFLOW_LITE_CORE_C_COMMON_H_ diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/profiler.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/profiler.h new file mode 100755 index 000000000..5c1f9f4ba --- /dev/null +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/profiler.h @@ -0,0 +1,85 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_PROFILING_TELEMETRY_C_PROFILER_H_ +#define TENSORFLOW_LITE_PROFILING_TELEMETRY_C_PROFILER_H_ + +#include + +#include "telemetry_setting.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// C API for TFLite telemetry profiler. +// See C++ interface in tflite::telemetry::TelemetryProfiler. +// Note: This struct does not comply with ABI stability. +typedef struct TfLiteTelemetryProfilerStruct { + // Data that profiler needs to identify itself. This data is owned by the + // profiler. The profiler is owned in the user code, so the profiler is + // responsible for deallocating this when it is destroyed. + void* data; + + // Reports a telemetry event with status. + // `event_name` indicates the name of the event (e.g. "Invoke") and should not + // be nullptr. + // `status`: uint64_t representation of TelemetryStatusCode. + void (*ReportTelemetryEvent)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, const char* event_name, + uint64_t status); + + // Reports an op telemetry event with status. + // Same as `ReportTelemetryEvent`, with additional args `op_idx` and + // `subgraph_idx`. + // `status`: uint64_t representation of TelemetryStatusCode. + void (*ReportTelemetryOpEvent)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, const char* event_name, + int64_t op_idx, int64_t subgraph_idx, uint64_t status); + + // Reports the model and interpreter settings. + // `setting_name` indicates the name of the setting and should not be nullptr. + // `settings`'s lifespan is not guaranteed outside the scope of + // `ReportSettings` call. + void (*ReportSettings)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, const char* setting_name, + const TfLiteTelemetrySettings* settings); + + // Signals the beginning of an operator invocation. + // `op_name` is the name of the operator and should not be nullptr. + // Op invoke event are triggered with OPERATOR_INVOKE_EVENT type for TfLite + // ops and delegate kernels, and DELEGATE_OPERATOR_INVOKE_EVENT for delegate + // ops within a delegate kernels, if the instrumentation is in place. + // Returns event handle which can be passed to `EndOpInvokeEvent` later. + uint32_t (*ReportBeginOpInvokeEvent)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name, + int64_t op_idx, int64_t subgraph_idx); + + // Signals the end to the event specified by `event_handle`. + void (*ReportEndOpInvokeEvent)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, uint32_t event_handle); + + // For op / delegate op with built-in performance measurements, they + // are able to report the elapsed time directly. + // `elapsed_time` is in microsecond. + void (*ReportOpInvokeEvent)( // NOLINT + struct TfLiteTelemetryProfilerStruct* profiler, const char* op_name, + uint64_t elapsed_time, int64_t op_idx, int64_t subgraph_idx); +} TfLiteTelemetryProfilerStruct; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_PROFILING_TELEMETRY_C_PROFILER_H_ diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/telemetry_setting.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/telemetry_setting.h new file mode 100755 index 000000000..f9653acbd --- /dev/null +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/telemetry_setting.h @@ -0,0 +1,103 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_PROFILING_TELEMETRY_C_TELEMETRY_SETTING_H_ +#define TENSORFLOW_LITE_PROFILING_TELEMETRY_C_TELEMETRY_SETTING_H_ + +#include +#include + +#include "common.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// TFLite model, interpreter or delegate settings that will be reported by +// telemetry. +// Note: This struct does not comply with ABI stability. +typedef struct TfLiteTelemetrySettings { + // Source of the settings. Determines how `data` is interpreted. + // See tflite::telemetry::TelemetrySource for definition. + uint32_t source; + + // Settings data. Interpretation based on `source`. + // If `source` is TFLITE_INTERPRETER, the type of `data` will + // be `TelemetryInterpreterSettings`. + // Otherwise, the data is provided by the individual delegate. + // Owned by the caller that exports TelemetrySettings (e.g. Interpreter). + const void* data; +} TfLiteTelemetrySettings; + +typedef struct TfLiteTelemetryConversionMetadata + TfLiteTelemetryConversionMetadata; + +const int32_t* TfLiteTelemetryConversionMetadataGetModelOptimizationModes( + const TfLiteTelemetryConversionMetadata* metadata); + +size_t TfLiteTelemetryConversionMetadataGetNumModelOptimizationModes( + const TfLiteTelemetryConversionMetadata* metadata); + +// TfLite model information and settings of the interpreter. +// Note: This struct does not comply with ABI stability. +typedef struct TfLiteTelemetryInterpreterSettings + TfLiteTelemetryInterpreterSettings; + +const TfLiteTelemetryConversionMetadata* +TfLiteTelemetryInterpreterSettingsGetConversionMetadata( + const TfLiteTelemetryInterpreterSettings* settings); + +// Telemetry data for a specific TFLite subgraph. +typedef struct TfLiteTelemetrySubgraphInfo TfLiteTelemetrySubgraphInfo; + +size_t TfLiteTelemetryInterpreterSettingsGetNumSubgraphInfo( + const TfLiteTelemetryInterpreterSettings* settings); + +const TfLiteTelemetrySubgraphInfo* +TfLiteTelemetryInterpreterSettingsGetSubgraphInfo( + const TfLiteTelemetryInterpreterSettings* settings); + +size_t TfLiteTelemetrySubgraphInfoGetNumOpTypes( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +const int32_t* TfLiteTelemetrySubgraphInfoGetOpTypes( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +size_t TfLiteTelemetrySubgraphInfoGetNumQuantizations( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +const TfLiteQuantization* TfLiteTelemetrySubgraphInfoGetQuantizations( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +size_t TfLiteTelemetrySubgraphInfoGetNumCustomOpNames( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +const char** TfLiteTelemetrySubgraphInfoGetCustomOpNames( + TfLiteTelemetrySubgraphInfo* subgraph_info); + +// Telemetry information for GPU delegate. +typedef struct TfLiteTelemetryGpuDelegateSettings + TfLiteTelemetryGpuDelegateSettings; + +size_t TfLiteTelemetryGpuDelegateSettingsGetNumNodesDelegated( + const TfLiteTelemetryGpuDelegateSettings* settings); + +int TfLiteTelemetryGpuDelegateSettingsGetBackend( + const TfLiteTelemetryGpuDelegateSettings* settings); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_PROFILING_TELEMETRY_C_TELEMETRY_SETTING_H_ diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/xnnpack_delegate.h b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/xnnpack_delegate.h index 43828ee33..142c9327e 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/xnnpack_delegate.h +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/Headers/xnnpack_delegate.h @@ -44,6 +44,9 @@ typedef struct { // Cache for packed weights, can be shared between multiple instances of // delegates. struct TfLiteXNNPackDelegateWeightsCache* weights_cache; + // Whether READ_VARIABLE, ASSIGN_VARIABLE, and VARIABLE_HANDLE operations + // should be handled by XNNPACK. + bool handle_variable_ops; } TfLiteXNNPackDelegateOptions; // Returns a structure with the default XNNPack delegate options. @@ -58,6 +61,12 @@ TfLiteXNNPackDelegateOptionsDefault(); TFL_CAPI_EXPORT TfLiteDelegate* TfLiteXNNPackDelegateCreate( const TfLiteXNNPackDelegateOptions* options); +// Performs the same task as TfLiteXNNPackDelegateCreate, with one exception. +// If the context passed contains a non-null xnnpack_threadpool field, +// we will use it as the threadpool for the delegate created. +TfLiteDelegate* TfLiteXNNPackDelegateCreateWithThreadpool( + const TfLiteXNNPackDelegateOptions* options, TfLiteContext* context); + // Returns the pthreadpool_t object used for parallelization in XNNPACK. // Can return NULL if the XNNPack delegate is single-threaded. // diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/TensorFlowLiteC b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/TensorFlowLiteC index e96cd3975..453699257 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/TensorFlowLiteC +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteC.framework/TensorFlowLiteC @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8d40e5d10759a84633d89bea03faaa4c0347f4509f72a357b4aed4a57dcab14c -size 20457876 +oid sha256:3d153716b58626e80022a017da90295f5ac1ad47992b4733ede8527bb3fa90ad +size 20926140 diff --git a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteCMetal.framework/TensorFlowLiteCMetal b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteCMetal.framework/TensorFlowLiteCMetal index e058e1cb1..0bfb114ba 100755 --- a/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteCMetal.framework/TensorFlowLiteCMetal +++ b/Packages/com.github.asus4.tflite/Plugins/iOS/TensorFlowLiteCMetal.framework/TensorFlowLiteCMetal @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:42693c45cead1bea167c6b2069a5700e0c3918f7ba5f82e3372a63af22a11a6e -size 18497296 +oid sha256:70ef306bdaa1d215b22541f4219ad17c1287dad0da265c0dfc926e4615836bad +size 18974576 diff --git a/Packages/com.github.asus4.tflite/Runtime/Delegates/XNNPackDelegate.cs b/Packages/com.github.asus4.tflite/Runtime/Delegates/XNNPackDelegate.cs index a55ad6e90..3f469d6a5 100644 --- a/Packages/com.github.asus4.tflite/Runtime/Delegates/XNNPackDelegate.cs +++ b/Packages/com.github.asus4.tflite/Runtime/Delegates/XNNPackDelegate.cs @@ -37,9 +37,16 @@ public enum Flags : uint [StructLayout(LayoutKind.Sequential)] public struct Options { + // Number of threads to use in the thread pool. + // 0 or negative value means no thread pool used. public int numThreads; public Flags flags; + // Cache for packed weights, can be shared between multiple instances of + // delegates. public TfLiteXNNPackDelegateWeightsCache weightsCache; + // Whether READ_VARIABLE, ASSIGN_VARIABLE, and VARIABLE_HANDLE operations + // should be handled by XNNPACK. + bool handleVariableOps; } public TfLiteDelegate Delegate { get; private set; } From 14677c39c152f323d043433bcd8ef89bc3e9c39a Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 21:30:44 +0200 Subject: [PATCH 2/6] Build macOS libs for v2.12.0 --- .../Plugins/macOS/libtensorflowlite_c.dylib | 4 ++-- .../Plugins/macOS/libtensorflowlite_metal_delegate.dylib | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_c.dylib b/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_c.dylib index cdc265168..ba6b48c70 100755 --- a/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_c.dylib +++ b/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_c.dylib @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:668a12b5afc571f1bae93ef901a796acd22d6c22b3e41a11145a744af470189c -size 9859830 +oid sha256:bf631a3e2141b257562ee868ce7dd9ad4975686ff92ac9e71c5777fe06174095 +size 10276086 diff --git a/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_metal_delegate.dylib b/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_metal_delegate.dylib index 7e7904df8..45ba8cf27 100755 --- a/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_metal_delegate.dylib +++ b/Packages/com.github.asus4.tflite/Plugins/macOS/libtensorflowlite_metal_delegate.dylib @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:db788f6d9e38e44fae075760c2110cb83e0d5e33dbab04c57eab694f55628e5b -size 4912464 +oid sha256:dbd91c8bcde8ed16be3041eb2eca1c1ef161e4f2d58c662da8ce19cfe79096c4 +size 6390208 From 674a01e6b7e9a7e206ab589b23439af21aa8a0af Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 22:57:33 +0200 Subject: [PATCH 3/6] Add linux libs for v2.12.0 --- .../Plugins/Linux/arm64/libtensorflowlite_c.so | 4 ++-- .../Plugins/Linux/x86_64/libtensorflowlite_c.so | 4 ++-- .../Plugins/Linux/x86_64/libtensorflowlite_gpu_delegate.so | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Packages/com.github.asus4.tflite/Plugins/Linux/arm64/libtensorflowlite_c.so b/Packages/com.github.asus4.tflite/Plugins/Linux/arm64/libtensorflowlite_c.so index f4460bf95..06148391d 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Linux/arm64/libtensorflowlite_c.so +++ b/Packages/com.github.asus4.tflite/Plugins/Linux/arm64/libtensorflowlite_c.so @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:16fe33d5f3c37da7f23b597317c7dc541b90af6bdf7274c8364dd8e37bac133b -size 3659976 +oid sha256:3c0403432912bc9ae5fe7af547fb4c30ac6efb4e44f8ac93b9e1a5d7d0f0ffb4 +size 3774632 diff --git a/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_c.so b/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_c.so index 2b49f0187..ddf9c0c07 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_c.so +++ b/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_c.so @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:8155ba2323b91a8e61a86d3b6ed76afda6282abfbdaa77a3b82967ee1d8649f6 -size 4605240 +oid sha256:bd8866930d85338bb5819ec3309a7bace7735c04104d19775fa3b9600004bc28 +size 4728120 diff --git a/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_gpu_delegate.so b/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_gpu_delegate.so index 8bd84cc7c..d8e86aa00 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_gpu_delegate.so +++ b/Packages/com.github.asus4.tflite/Plugins/Linux/x86_64/libtensorflowlite_gpu_delegate.so @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:26fa332b5085b4228c961a67b648e26d50d4fd77394dde9a2182cac3ee6fa6fa -size 4251232 +oid sha256:6a61360181252ab5da3ceeefd6665aadaf17dee93d511de737e4b1ab6cc7856a +size 4406880 From 694af4bdfed81e76be627ab88e6863a988803b47 Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 21:18:15 +0200 Subject: [PATCH 4/6] Build windows libs for v2.12.0 --- .../Plugins/Windows/libtensorflowlite_c.dll | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Packages/com.github.asus4.tflite/Plugins/Windows/libtensorflowlite_c.dll b/Packages/com.github.asus4.tflite/Plugins/Windows/libtensorflowlite_c.dll index c45ef0f16..ca017beb5 100644 --- a/Packages/com.github.asus4.tflite/Plugins/Windows/libtensorflowlite_c.dll +++ b/Packages/com.github.asus4.tflite/Plugins/Windows/libtensorflowlite_c.dll @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:d59113bbf6421c9dec5d002b63b7edb5cb324919b82cc1d5cebb42a22ebdd117 -size 3020800 +oid sha256:8b0f287d3e24658c5fd5cf4ca814ffa6d3efd53938926a6cfe3d821e1d075a79 +size 3132928 From 1e0ec028824595fedcf9ed5204722a3626e66801 Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 23:01:39 +0200 Subject: [PATCH 5/6] Build Android libs for v2.12.0 --- .../Plugins/Android/arm64-v8a/libtensorflowlite_gpu_gl.so | 4 ++-- .../Plugins/Android/armeabi-v7a/libtensorflowlite_gpu_gl.so | 4 ++-- .../Plugins/Android/tensorflow-lite-gpu.aar | 4 ++-- .../Plugins/Android/tensorflow-lite.aar | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Packages/com.github.asus4.tflite/Plugins/Android/arm64-v8a/libtensorflowlite_gpu_gl.so b/Packages/com.github.asus4.tflite/Plugins/Android/arm64-v8a/libtensorflowlite_gpu_gl.so index a58256baa..e18ba1c8d 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Android/arm64-v8a/libtensorflowlite_gpu_gl.so +++ b/Packages/com.github.asus4.tflite/Plugins/Android/arm64-v8a/libtensorflowlite_gpu_gl.so @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:a13c654d76ed39f4a76ccb9efeaf42c3c5f4cc9bbc462297d778acbfa6e8d5be -size 3260000 +oid sha256:8465dee5adb947df5ea3cca06a22632bcbd3780d9b59fdfb605824c6fc6bb9db +size 3340072 diff --git a/Packages/com.github.asus4.tflite/Plugins/Android/armeabi-v7a/libtensorflowlite_gpu_gl.so b/Packages/com.github.asus4.tflite/Plugins/Android/armeabi-v7a/libtensorflowlite_gpu_gl.so index f6f435468..cc5436876 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Android/armeabi-v7a/libtensorflowlite_gpu_gl.so +++ b/Packages/com.github.asus4.tflite/Plugins/Android/armeabi-v7a/libtensorflowlite_gpu_gl.so @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:3e6fd3aef6dc5940d926c160ebf3181ce883c041f73ebc8eb3ee5b56989efc91 -size 1464252 +oid sha256:1b2b73172ca47d627827eae552742b6d523c56261db20d12ff702ed2493ab55c +size 1488828 diff --git a/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite-gpu.aar b/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite-gpu.aar index f40d7fc62..6f0aadb0d 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite-gpu.aar +++ b/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite-gpu.aar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:af6c8ba01ca5f0b4d0a814476bd06082fbb40e02685eda23b3d35ae8c44b7707 -size 4121677 +oid sha256:50f96b3779558e3c119e7a65ec69ea9d3fc4b41443c2032c3b13ece40d971c58 +size 4192515 diff --git a/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite.aar b/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite.aar index d3fe54997..a25ffb5ad 100755 --- a/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite.aar +++ b/Packages/com.github.asus4.tflite/Plugins/Android/tensorflow-lite.aar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7c88e0445b25d0863fd30797de36e42014a4590082bd40c1caf7f80857a79a5a -size 4641846 +oid sha256:00858011b4afa1e4ad3f4ceaa2182794a19b8e63e92aadbc05b10397bd4dc07a +size 4988429 From 0478efedf8fb7510978e6f1f371626cdd24ac781 Mon Sep 17 00:00:00 2001 From: Koki Ibukuro Date: Sun, 14 May 2023 23:10:26 +0200 Subject: [PATCH 6/6] Bump package version --- .circleci/config.yml | 2 +- Packages/com.github.asus4.mediapipe/package.json | 6 +++--- Packages/com.github.asus4.tflite.common/package.json | 4 ++-- Packages/com.github.asus4.tflite/package.json | 2 +- Packages/packages-lock.json | 6 +++--- README.md | 8 ++++---- 6 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1a676b4b1..4a77ca703 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -10,7 +10,7 @@ parameters: default: "git@github.com:tensorflow/tensorflow.git" tf-branch: type: string - default: "v2.11.1" + default: "v2.12.0" jobs: build-mac: diff --git a/Packages/com.github.asus4.mediapipe/package.json b/Packages/com.github.asus4.mediapipe/package.json index 79f958ae5..ac5398055 100644 --- a/Packages/com.github.asus4.mediapipe/package.json +++ b/Packages/com.github.asus4.mediapipe/package.json @@ -9,11 +9,11 @@ "license": "SEE LICENSE IN LICENSE", "unity": "2019.3", "unityRelease": "0f1", - "version": "2.11.1", + "version": "2.12.0", "type": "library", "hideInEditor": false, "dependencies": { - "com.github.asus4.tflite": "2.11.1", - "com.github.asus4.tflite.common": "2.11.1" + "com.github.asus4.tflite": "2.12.0", + "com.github.asus4.tflite.common": "2.12.0" } } \ No newline at end of file diff --git a/Packages/com.github.asus4.tflite.common/package.json b/Packages/com.github.asus4.tflite.common/package.json index 9644b356c..ff5f94856 100644 --- a/Packages/com.github.asus4.tflite.common/package.json +++ b/Packages/com.github.asus4.tflite.common/package.json @@ -9,10 +9,10 @@ "license": "SEE LICENSE IN LICENSE", "unity": "2019.3", "unityRelease": "0f1", - "version": "2.11.1", + "version": "2.12.0", "type": "library", "hideInEditor": false, "dependencies": { - "com.github.asus4.tflite": "2.11.1" + "com.github.asus4.tflite": "2.12.0" } } \ No newline at end of file diff --git a/Packages/com.github.asus4.tflite/package.json b/Packages/com.github.asus4.tflite/package.json index 57a5ef61c..3ee4f471e 100644 --- a/Packages/com.github.asus4.tflite/package.json +++ b/Packages/com.github.asus4.tflite/package.json @@ -9,7 +9,7 @@ "license": "SEE LICENSE IN LICENSE", "unity": "2019.3", "unityRelease": "0f1", - "version": "2.11.1", + "version": "2.12.0", "type": "library", "hideInEditor": false } \ No newline at end of file diff --git a/Packages/packages-lock.json b/Packages/packages-lock.json index 6c6b5b9e8..07d5c477e 100644 --- a/Packages/packages-lock.json +++ b/Packages/packages-lock.json @@ -12,8 +12,8 @@ "depth": 0, "source": "embedded", "dependencies": { - "com.github.asus4.tflite": "2.11.1", - "com.github.asus4.tflite.common": "2.11.1" + "com.github.asus4.tflite": "2.12.0", + "com.github.asus4.tflite.common": "2.12.0" } }, "com.github.asus4.texture-source": { @@ -34,7 +34,7 @@ "depth": 0, "source": "embedded", "dependencies": { - "com.github.asus4.tflite": "2.11.1" + "com.github.asus4.tflite": "2.12.0" } }, "com.unity.2d.sprite": { diff --git a/README.md b/README.md index defbfe285..05593ce65 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ Tested on - iOS / Android / macOS / Windows / Linux - Unity 2021.3.24f1 -- TensorFlow 2.11.1 +- TensorFlow 2.12.0 Included examples: @@ -64,11 +64,11 @@ Included prebuilt libraries: ], "dependencies": { // Core TensorFlow Lite libraries - "com.github.asus4.tflite": "2.11.1", + "com.github.asus4.tflite": "2.12.0", // Utilities for TFLite - "com.github.asus4.tflite.common": "2.11.1", + "com.github.asus4.tflite.common": "2.12.0", // Utilities for MediaPipe - "com.github.asus4.mediapipe": "2.11.1", + "com.github.asus4.mediapipe": "2.12.0", ...// other dependencies } }