Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion internal/providers/configs/bedrock.json
Original file line number Diff line number Diff line change
Expand Up @@ -92,4 +92,4 @@
"supports_attachments": true
}
]
}
}
95 changes: 95 additions & 0 deletions internal/providers/configs/ollamacloud.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,95 @@
{
"name": "Ollama Cloud",
"id": "ollamacloud",
"type": "ollama-cloud",
"api_key": "",
"api_endpoint": "https://ollama.com",
"default_large_model_id": "gpt-oss:120b-cloud",
"default_small_model_id": "gpt-oss:20b-cloud",
"models": [
{
"id": "deepseek-v3.1:671b-cloud",
"name": "DeepSeek V3.1 671B",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": true,
"supports_attachments": false
},
{
"id": "gpt-oss:20b-cloud",
"name": "GPT-OSS 20B",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": false,
"supports_attachments": false
},
{
"id": "gpt-oss:120b-cloud",
"name": "GPT-OSS 120B",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": true,
"supports_attachments": false
},
{
"id": "kimi-k2:1t-cloud",
"name": "Kimi K2 1T",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": true,
"supports_attachments": false
},
{
"id": "qwen3-coder:480b-cloud",
"name": "Qwen3 Coder 480B",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": false,
"supports_attachments": false
},
{
"id": "glm-4.6:cloud",
"name": "GLM 4.6",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": true,
"supports_attachments": false
},
{
"id": "minimax-m2:cloud",
"name": "Minimax M2",
"cost_per_1m_in": 0,
"cost_per_1m_out": 0,
"cost_per_1m_in_cached": 0,
"cost_per_1m_out_cached": 0,
"context_window": 128000,
"default_max_tokens": 4096,
"can_reason": false,
"supports_attachments": false
}
]
}
8 changes: 8 additions & 0 deletions internal/providers/providers.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ var huggingFaceConfig []byte
//go:embed configs/aihubmix.json
var aiHubMixConfig []byte

//go:embed configs/ollamacloud.json
var ollamaCloudConfig []byte

// ProviderFunc is a function that returns a Provider.
type ProviderFunc func() catwalk.Provider

Expand All @@ -80,6 +83,7 @@ var providerRegistry = []ProviderFunc{
deepSeekProvider,
huggingFaceProvider,
aiHubMixProvider,
ollamaCloudProvider,
syntheticProvider,
}

Expand Down Expand Up @@ -168,3 +172,7 @@ func huggingFaceProvider() catwalk.Provider {
func aiHubMixProvider() catwalk.Provider {
return loadProviderFromConfig(aiHubMixConfig)
}

func ollamaCloudProvider() catwalk.Provider {
return loadProviderFromConfig(ollamaCloudConfig)
}
2 changes: 2 additions & 0 deletions pkg/catwalk/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ const (
InferenceProviderChutes InferenceProvider = "chutes"
InferenceProviderHuggingFace InferenceProvider = "huggingface"
InferenceAIHubMix InferenceProvider = "aihubmix"
InferenceProviderOllamaCloud InferenceProvider = "ollamacloud"
)

// Provider represents an AI provider configuration.
Expand Down Expand Up @@ -97,6 +98,7 @@ func KnownProviders() []InferenceProvider {
InferenceProviderChutes,
InferenceProviderHuggingFace,
InferenceAIHubMix,
InferenceProviderOllamaCloud,
}
}

Expand Down
Loading