- Save the graph in pbtxt format.
tf.train.write_graph(sess.graph_def, './', 'animegan.pbtxt')
- Find the name of the output node.
graph = sess.graph
print([node.name for node in graph.as_graph_def().node])
- Make a frozen graph.
from tensorflow.python.tools.freeze_graph import freeze_graph
import tfcoreml
graph_def_file = 'animegan.pbtxt'
checkpoint_file = 'checkpoint/generator_Hayao_weight/Hayao-64.ckpt'
frozen_model_file = './frozen_model.pb'
output_node_names = 'generator/G_MODEL/out_layer/Tanh'
freeze_graph(input_graph=graph_def_file,
input_saver="",
input_binary=False,
input_checkpoint=checkpoint_file,
output_node_names=output_node_names,
restore_op_name="save/restore_all",
filename_tensor_name="save/Const:0",
output_graph=frozen_model_file,
clear_devices=True,
initializer_nodes="")
- Convert with tfcoreml.
input_tensor_shapes = {'test:0':[1, 256, 256, 3]} # batch size is 1
# Output CoreML model path
coreml_model_file = './animegan.mlmodel'
output_tensor_names = ['generator/G_MODEL/out_layer/Tanh:0']
# Call the converter
coreml_model = tfcoreml.convert(
tf_model_path='frozen_model.pb',
mlmodel_path=coreml_model_file,
input_name_shape_dict=input_tensor_shapes,
output_feature_names=output_tensor_names,
image_input_names='test:0',
red_bias=-1,
green_bias=-1,
blue_bias=-1,
image_scale=2/255,
minimum_ios_deployment_target='12'
)
Image Classifier
Google Drive Link | Size | Original Project |
---|---|---|
Efficientnetb0 | 22.7 MB | TensorFlowHub |
GAN
Google Drive Link | Size | Original Project |
---|---|---|
UGATIT_selfie2anime | 1.12GB | taki0112/UGATIT |
AnimeGANv2_Hayao | 8.7MB | TachibanaYoshino/AnimeGANv2 |
AnimeGANv2_Paprika | 8.7MB | TachibanaYoshino/AnimeGANv2 |
WarpGAN Caricature | 35.5MB | seasonSH/WarpGAN |
CartoonGAN_Shinkai | 44.6MB | mnicnc404/CartoonGan-tensorflow |
CartoonGAN_Hayao | 44.6MB | mnicnc404/CartoonGan-tensorflow |
CartoonGAN_Hosoda | 44.6MB | mnicnc404/CartoonGan-tensorflow |
CartoonGAN_Paprika | 44.6MB | mnicnc404/CartoonGan-tensorflow |
1,Use CoreGANContainer. You can use models with dragging&dropping into the container project.
import Vision
lazy var coreMLRequest:VNCoreMLRequest = {
let model = try! VNCoreMLModel(for: modelname().model)
let request = VNCoreMLRequest(model: model, completionHandler: self.coreMLCompletionHandler)
return request
}()
let handler = VNImageRequestHandler(ciImage: ciimage,options: [:])
DispatchQueue.global(qos: .userInitiated).async {
try? handler.perform([coreMLRequest])
}
For visualizing multiArray as image, Mr. Hollance’s “CoreML Helpers” are very convenient. CoreML Helpers
Converting from MultiArray to Image with CoreML Helpers.
func coreMLCompletionHandler(request:VNRequest?、error:Error?){
let = coreMLRequest.results?.first as!VNCoreMLFeatureValueObservation
let multiArray = result.featureValue.multiArrayValue
let cgimage = multiArray?.cgImage(min:-1、max:1、channel:nil)
Apps made by Core ML models. AnimateU