Skip to content

Commit e0f6101

Browse files
committed
Create julia examples.
1 parent b9a7865 commit e0f6101

File tree

31 files changed

+1053
-0
lines changed

31 files changed

+1053
-0
lines changed
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
using Bytez
2+
using Base64
3+
using HTTP
4+
5+
6+
# Function to get the audio as base64-encoded string
7+
function get_base64_audio(url::String)::String
8+
response = HTTP.get(url)
9+
return base64encode(response.body)
10+
end
11+
12+
# Get the base64 encoded audio from the URL
13+
input_audio_base64 = get_base64_audio(
14+
"https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/audio-classification/audio.wav",
15+
)
16+
17+
# Initialize the Bytez client with your API key
18+
client = Bytez.init("YOUR BYTEZ KEY HERE")
19+
20+
# Load the model
21+
model = client.model("aaraki/wav2vec2-base-finetuned-ks")
22+
23+
model.load()
24+
25+
# Run inference on the model
26+
result = model.run(Dict("b64AudioBufferWav" => input_audio_base64))
27+
28+
# Process and print the results
29+
label_objects = result["output"]
30+
31+
for label_object in label_objects
32+
score = label_object["score"]
33+
label = label_object["label"]
34+
println("Score: $score, Label: $label")
35+
end
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
using Bytez
2+
using Base64
3+
using HTTP
4+
5+
function get_base64_audio(url::String)::String
6+
response = HTTP.get(url)
7+
return base64encode(response.body)
8+
end
9+
10+
input_audio_base64 = get_base64_audio(
11+
"https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/audio-classification/audio.wav",
12+
)
13+
14+
client = Bytez.init("YOUR BYTEZ KEY HERE")
15+
16+
model = client.model("facebook/data2vec-audio-base-960h")
17+
18+
model.load()
19+
20+
result = model.run(Dict("b64AudioBufferWav" => input_audio_base64))
21+
22+
output = result["output"]
23+
24+
# depending on the model, there may be additional props returned
25+
println(output)
26+
27+
text = output["text"]
28+
29+
println("Inference is: $text")
30+
31+
a = 2
32+
Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("microsoft/Phi-3-mini-4k-instruct")
6+
7+
model.load()
8+
9+
messages = [
10+
Dict("role" => "system", "content" => "You are a friendly chatbot"),
11+
Dict("role" => "user", "content" => "What is the capital of England?"),
12+
]
13+
14+
result = model.run(messages, Dict("max_length" => 100))
15+
16+
output = result["output"]
17+
18+
generated_text = output[1]["generated_text"]
19+
20+
for message in generated_text
21+
# depending on the model, there may be additional props returned
22+
println(message)
23+
24+
content = message["content"]
25+
role = message["role"]
26+
27+
println(Dict("content" => content, "role" => role))
28+
end
Lines changed: 50 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
using Bytez
2+
using Base64
3+
using HTTP
4+
using Printf
5+
6+
WORKING_DIR = dirname(@__FILE__)
7+
8+
client = Bytez.init("YOUR BYTEZ KEY HERE")
9+
10+
model = client.model("vinvino02/glpn-nyu")
11+
12+
model.load()
13+
14+
input_image_url = "https://as1.ftcdn.net/v2/jpg/03/03/55/82/1000_F_303558268_YNUQp9NNMTE0X4zrj314mbWcDHd1pZPD.jpg"
15+
16+
# Run the model with the input image
17+
result = model.run(input_image_url)
18+
19+
output = result["output"]
20+
21+
# depending on the model, there may be additional props returned
22+
depth_png = output["depth_png"]
23+
formatted_predicted_depth_array = output["formatted_predicted_depth_array"]
24+
25+
##### Decode and save the image #####
26+
img_buffer = base64decode(depth_png)
27+
28+
image_path = joinpath(WORKING_DIR, "testImage.png")
29+
open(image_path, "w") do f
30+
write(f, img_buffer)
31+
end
32+
33+
# Write the original image for comparison
34+
original_image_path = joinpath(WORKING_DIR, "originalImage.jpg")
35+
response = HTTP.get(input_image_url)
36+
37+
open(original_image_path, "w") do f
38+
write(f, response.body)
39+
end
40+
41+
println("Wrote the original image to: ", original_image_path)
42+
println("Wrote the inference image to: ", image_path)
43+
44+
##### 2D depth map, object representation of the pixel values for the depth map #####
45+
rows = formatted_predicted_depth_array
46+
for (j, row) in enumerate(rows)
47+
for (i, pixel) in enumerate(row)
48+
# insert code here if you need these values directly
49+
end
50+
end
Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,32 @@
1+
using Bytez
2+
using Printf
3+
4+
WORKING_DIR = dirname(@__FILE__)
5+
6+
client = Bytez.init("YOUR BYTEZ KEY HERE")
7+
8+
model = client.model("cloudqi/CQI_Visual_Question_Awnser_PT_v0")
9+
10+
model.load()
11+
12+
input_data = Dict(
13+
"image" => "https://templates.invoicehome.com/invoice-template-us-neat-750px.png",
14+
"question" => "What's the total cost?",
15+
)
16+
17+
result = model.run(input_data)
18+
19+
output = result["output"]
20+
21+
# depending on the model, there may be additional props returned
22+
println(output)
23+
24+
output_object = output[1]
25+
26+
answer = output_object["answer"]
27+
score = output_object["score"]
28+
start = output_object["start"]
29+
# end is a reserved keyword in julia
30+
_end = output_object["end"]
31+
32+
println(Dict("answer" => answer, "score" => score, "start" => start, "end" => _end))
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("Salesforce/SFR-Embedding-2_R")
6+
7+
model.load()
8+
9+
input_text = "Your text for feature extraction goes here..."
10+
11+
result = model.run(input_text)
12+
13+
output = result["output"]
14+
15+
# depending on the model, there may be additional props returned
16+
println(output)
17+
18+
embedding = output[1]
19+
20+
println(embedding)
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("almanach/camembert-base")
6+
7+
model.load()
8+
9+
input_text = "The capital of France is <mask>."
10+
11+
result = model.run(input_text)
12+
13+
sequence_objects = result["output"]
14+
15+
for sequence_object in sequence_objects
16+
# depending on the model, there may be additional props returned
17+
println(sequence_object)
18+
19+
sequence = sequence_object["sequence"]
20+
score = sequence_object["score"]
21+
token = sequence_object["token"]
22+
token_str = sequence_object["token_str"]
23+
24+
println(Dict("sequence" => sequence, "score" => score, "token" => token, "token_str" => token_str))
25+
end
Lines changed: 56 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,56 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("Nexusflow/NexusRaven-V2-13B")
6+
7+
model.load()
8+
9+
input_text = "What's the weather like in Seattle right now?"
10+
11+
options = Dict(
12+
"params" => Dict(
13+
"max_new_tokens" => 20,
14+
"min_new_tokens" => 50,
15+
"temperature" => 0.001,
16+
"do_sample" => false),
17+
"stream" => true,
18+
)
19+
20+
prompt_template = """
21+
Function:
22+
def get_weather_data(coordinates):
23+
\"\"\"
24+
Fetches weather data from the Open-Meteo API for the given latitude and longitude.
25+
26+
Args:
27+
coordinates (tuple): The latitude and longitude of the location.
28+
29+
Returns:
30+
float: The current temperature in the coordinates you've asked for
31+
\"""
32+
33+
Function:
34+
def get_coordinates_from_city(city_name):
35+
\"""
36+
Fetches the latitude and longitude of a given city name using the Maps.co Geocoding API.
37+
38+
Args:
39+
city_name (str): The name of the city.
40+
41+
Returns:
42+
tuple: The latitude and longitude of the city.
43+
\"""
44+
45+
User Query: {query}<human_end>
46+
"""
47+
48+
# Prepare the prompt with the user query
49+
prompt = replace(prompt_template, "{query}" => input_text)
50+
51+
stream = model.run(prompt, options)
52+
53+
while isopen(stream)
54+
item = take!(stream) # Take each item as it enters the channel
55+
println(item) # Print the item
56+
end
Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("google/vit-base-patch16-224")
6+
7+
model.load()
8+
9+
input_image_url = "https://www.padoniavets.com/sites/default/files/field/image/cats-and-dogs.jpg"
10+
11+
result = model.run(input_image_url)
12+
13+
labelObjects = result["output"]
14+
15+
for labelObject in labelObjects
16+
# depending on the model, there may be additional props returned
17+
println(labelObject)
18+
19+
label = labelObject["label"]
20+
score = labelObject["score"]
21+
22+
println(Dict("label" => label, "score" => score))
23+
end
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
using Bytez
2+
3+
client = Bytez.init("YOUR BYTEZ KEY HERE")
4+
5+
model = client.model("nomic-ai/nomic-embed-vision-v1")
6+
7+
model.load()
8+
9+
input_image_url = "https://as1.ftcdn.net/v2/jpg/03/03/55/82/1000_F_303558268_YNUQp9NNMTE0X4zrj314mbWcDHd1pZPD.jpg"
10+
11+
result = model.run(input_image_url)
12+
13+
output = result["output"]
14+
15+
# depending on the model, there may be additional props returned
16+
println(output)
17+
18+
embedding = output[1]
19+
20+
println(embedding)

0 commit comments

Comments
 (0)