Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 43 additions & 1 deletion .env.example
Original file line number Diff line number Diff line change
Expand Up @@ -76,8 +76,50 @@ MASTODON_URL="https://mastodon.social"
MASTODON_CLIENT_ID=""
MASTODON_CLIENT_SECRET=""

# Misc Settings
# === AI/LLM Configuration ===
# Provider: openai, anthropic, google, azure, ollama, groq
LLM_PROVIDER="openai"
LLM_MODEL="gpt-4.1"
LLM_TEMPERATURE="0.7"

# OpenAI (default)
OPENAI_API_KEY=""
OPENAI_BASE_URL=""

# Anthropic
ANTHROPIC_API_KEY=""
ANTHROPIC_BASE_URL=""

# Google AI
GOOGLE_AI_API_KEY=""

# Azure OpenAI
AZURE_OPENAI_API_KEY=""
AZURE_OPENAI_ENDPOINT=""
AZURE_OPENAI_DEPLOYMENT_NAME=""
AZURE_OPENAI_API_VERSION="2024-02-15-preview"

# Ollama (self-hosted)
OLLAMA_BASE_URL="http://localhost:11434"

# Groq
GROQ_API_KEY=""

# === Image Generation ===
# Provider: openai, fal, stability, replicate
IMAGE_PROVIDER="openai"
IMAGE_MODEL="dall-e-3"

# FAL.ai
FAL_KEY=""

# Stability AI
STABILITY_API_KEY=""

# Replicate
REPLICATE_API_TOKEN=""

# Misc Settings
NEXT_PUBLIC_DISCORD_SUPPORT=""
NEXT_PUBLIC_POLOTNO=""
# NOT_SECURED=false
Expand Down
4 changes: 2 additions & 2 deletions apps/backend/src/api/api.module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ import { MediaController } from '@gitroom/backend/api/routes/media.controller';
import { UploadModule } from '@gitroom/nestjs-libraries/upload/upload.module';
import { BillingController } from '@gitroom/backend/api/routes/billing.controller';
import { NotificationsController } from '@gitroom/backend/api/routes/notifications.controller';
import { OpenaiService } from '@gitroom/nestjs-libraries/openai/openai.service';
import { AIService } from '@gitroom/nestjs-libraries/ai/ai.service';
import { ExtractContentService } from '@gitroom/nestjs-libraries/openai/extract.content.service';
import { CodesService } from '@gitroom/nestjs-libraries/services/codes.service';
import { CopilotController } from '@gitroom/backend/api/routes/copilot.controller';
Expand Down Expand Up @@ -61,7 +61,7 @@ const authenticatedController = [
providers: [
AuthService,
StripeService,
OpenaiService,
AIService,
ExtractContentService,
AuthMiddleware,
PoliciesGuard,
Expand Down
25 changes: 8 additions & 17 deletions apps/backend/src/api/routes/copilot.controller.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,10 +10,11 @@ import {
} from '@nestjs/common';
import {
CopilotRuntime,
OpenAIAdapter,
copilotRuntimeNodeHttpEndpoint,
copilotRuntimeNextJSAppRouterEndpoint,
} from '@copilotkit/runtime';
import { createCopilotAdapterSync } from '@gitroom/nestjs-libraries/ai/copilot/copilot.factory';
import { isLLMConfigured } from '@gitroom/nestjs-libraries/ai/llm/llm.config';
import { GetOrgFromRequest } from '@gitroom/nestjs-libraries/user/org.from.request';
import { Organization } from '@prisma/client';
import { SubscriptionService } from '@gitroom/nestjs-libraries/database/prisma/subscriptions/subscription.service';
Expand All @@ -38,20 +39,15 @@ export class CopilotController {
) {}
@Post('/chat')
chatAgent(@Req() req: Request, @Res() res: Response) {
if (
process.env.OPENAI_API_KEY === undefined ||
process.env.OPENAI_API_KEY === ''
) {
Logger.warn('OpenAI API key not set, chat functionality will not work');
if (!isLLMConfigured()) {
Logger.warn('LLM not configured, chat functionality will not work');
return;
}

const copilotRuntimeHandler = copilotRuntimeNodeHttpEndpoint({
endpoint: '/copilot/chat',
runtime: new CopilotRuntime(),
serviceAdapter: new OpenAIAdapter({
model: 'gpt-4.1',
}),
serviceAdapter: createCopilotAdapterSync(),
});

return copilotRuntimeHandler(req, res);
Expand All @@ -64,11 +60,8 @@ export class CopilotController {
@Res() res: Response,
@GetOrgFromRequest() organization: Organization
) {
if (
process.env.OPENAI_API_KEY === undefined ||
process.env.OPENAI_API_KEY === ''
) {
Logger.warn('OpenAI API key not set, chat functionality will not work');
if (!isLLMConfigured()) {
Logger.warn('LLM not configured, chat functionality will not work');
return;
}
const mastra = await this._mastraService.mastra();
Expand Down Expand Up @@ -96,9 +89,7 @@ export class CopilotController {
endpoint: '/copilot/agent',
runtime,
// properties: req.body.variables.properties,
serviceAdapter: new OpenAIAdapter({
model: 'gpt-4.1',
}),
serviceAdapter: createCopilotAdapterSync(),
});

return copilotRuntimeHandler.handleRequest(req, res);
Expand Down
4 changes: 2 additions & 2 deletions apps/backend/src/public-api/public.api.module.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import { PoliciesGuard } from '@gitroom/backend/services/auth/permissions/permis
import { PermissionsService } from '@gitroom/backend/services/auth/permissions/permissions.service';
import { IntegrationManager } from '@gitroom/nestjs-libraries/integrations/integration.manager';
import { UploadModule } from '@gitroom/nestjs-libraries/upload/upload.module';
import { OpenaiService } from '@gitroom/nestjs-libraries/openai/openai.service';
import { AIService } from '@gitroom/nestjs-libraries/ai/ai.service';
import { ExtractContentService } from '@gitroom/nestjs-libraries/openai/extract.content.service';
import { CodesService } from '@gitroom/nestjs-libraries/services/codes.service';
import { PublicIntegrationsController } from '@gitroom/backend/public-api/routes/v1/public.integrations.controller';
Expand All @@ -18,7 +18,7 @@ const authenticatedController = [PublicIntegrationsController];
providers: [
AuthService,
StripeService,
OpenaiService,
AIService,
ExtractContentService,
PoliciesGuard,
PermissionsService,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ import {
ThirdParty,
ThirdPartyAbstract,
} from '@gitroom/nestjs-libraries/3rdparties/thirdparty.interface';
import { OpenaiService } from '@gitroom/nestjs-libraries/openai/openai.service';
import { AIService } from '@gitroom/nestjs-libraries/ai/ai.service';
import { timer } from '@gitroom/helpers/utils/timer';

@ThirdParty({
Expand All @@ -19,7 +19,7 @@ export class HeygenProvider extends ThirdPartyAbstract<{
captions: string;
}> {
// @ts-ignore
constructor(private _openaiService: OpenaiService) {
constructor(private _aiService: AIService) {
super();
}

Expand Down Expand Up @@ -49,7 +49,7 @@ export class HeygenProvider extends ThirdPartyAbstract<{

async generateVoice(apiKey: string, data: { text: string }) {
return {
voice: await this._openaiService.generateVoiceFromText(data.text),
voice: await this._aiService.generateVoiceFromText(data.text),
};
}

Expand Down
Original file line number Diff line number Diff line change
@@ -1,18 +1,14 @@
import { Injectable } from '@nestjs/common';
import { BaseMessage, HumanMessage } from '@langchain/core/messages';
import { END, START, StateGraph } from '@langchain/langgraph';
import { ChatOpenAI } from '@langchain/openai';
import { ChatPromptTemplate } from '@langchain/core/prompts';
import { agentCategories } from '@gitroom/nestjs-libraries/agent/agent.categories';
import { z } from 'zod';
import { agentTopics } from '@gitroom/nestjs-libraries/agent/agent.topics';
import { PostsService } from '@gitroom/nestjs-libraries/database/prisma/posts/posts.service';
import { createLLMSync } from '@gitroom/nestjs-libraries/ai/llm/llm.factory';

const model = new ChatOpenAI({
apiKey: process.env.OPENAI_API_KEY || 'sk-proj-',
model: 'gpt-4o-2024-08-06',
temperature: 0,
});
const model = createLLMSync({ temperature: 0 });

interface WorkflowChannelsState {
messages: BaseMessage[];
Expand Down
16 changes: 4 additions & 12 deletions libraries/nestjs-libraries/src/agent/agent.graph.service.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ import {
ToolMessage,
} from '@langchain/core/messages';
import { END, START, StateGraph } from '@langchain/langgraph';
import { ChatOpenAI, DallEAPIWrapper } from '@langchain/openai';
import { TavilySearchResults } from '@langchain/community/tools/tavily_search';
import { ToolNode } from '@langchain/langgraph/prebuilt';
import { ChatPromptTemplate } from '@langchain/core/prompts';
Expand All @@ -15,22 +14,15 @@ import { z } from 'zod';
import { MediaService } from '@gitroom/nestjs-libraries/database/prisma/media/media.service';
import { UploadFactory } from '@gitroom/nestjs-libraries/upload/upload.factory';
import { GeneratorDto } from '@gitroom/nestjs-libraries/dtos/generator/generator.dto';
import { createLLMSync } from '@gitroom/nestjs-libraries/ai/llm/llm.factory';
import { ImageFactory } from '@gitroom/nestjs-libraries/ai/image/image.factory';

const tools = !process.env.TAVILY_API_KEY
? []
: [new TavilySearchResults({ maxResults: 3 })];
const toolNode = new ToolNode(tools);

const model = new ChatOpenAI({
apiKey: process.env.OPENAI_API_KEY || 'sk-proj-',
model: 'gpt-4.1',
temperature: 0.7,
});

const dalle = new DallEAPIWrapper({
apiKey: process.env.OPENAI_API_KEY || 'sk-proj-',
model: 'dall-e-3',
});
const model = createLLMSync();

interface WorkflowChannelsState {
messages: BaseMessage[];
Expand Down Expand Up @@ -320,7 +312,7 @@ export class AgentGraphService {

const newContent = await Promise.all(
(state.content || []).map(async (p) => {
const image = await dalle.invoke(p.prompt!);
const image = await ImageFactory.generate(p.prompt!);
return {
...p,
image,
Expand Down
155 changes: 155 additions & 0 deletions libraries/nestjs-libraries/src/ai/ai-sdk/ai-sdk.factory.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,155 @@
/**
* Factory for creating Vercel AI SDK models (used by Mastra)
*/

import { getLLMConfig, getLLMProvider, type LLMProvider } from '../llm/llm.config';

export interface CreateAISdkModelOptions {
provider?: LLMProvider;
model?: string;
}

/**
* Create a Vercel AI SDK model based on the configured provider
* Returns a model compatible with Mastra Agent
*/
export async function createAISdkModel(options?: CreateAISdkModelOptions) {
const config = getLLMConfig();
const provider = options?.provider || config.provider;
const model = options?.model || config.model;

switch (provider) {
case 'openai': {
const { openai } = await import('@ai-sdk/openai');
return openai(model);
}

case 'anthropic': {
const { anthropic } = await import('@ai-sdk/anthropic');
return anthropic(model);
}

case 'google': {
const { google } = await import('@ai-sdk/google');
return google(model);
}

case 'azure': {
const { azure } = await import('@ai-sdk/azure');
return azure(model);
}

case 'groq': {
// Groq uses OpenAI-compatible API
const { createOpenAI } = await import('@ai-sdk/openai');
const groq = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
});
return groq(model);
}

case 'ollama': {
// Ollama uses OpenAI-compatible API
const { createOpenAI } = await import('@ai-sdk/openai');
const ollama = createOpenAI({
baseURL: process.env.OLLAMA_BASE_URL || 'http://localhost:11434/v1',
apiKey: 'ollama', // Ollama doesn't require a real key
});
return ollama(model);
}

default: {
// Default to OpenAI
const { openai } = await import('@ai-sdk/openai');
return openai(model);
}
}
}

/**
* Create a Vercel AI SDK model synchronously based on configured provider
*/
export function createAISdkModelSync(options?: CreateAISdkModelOptions) {
const config = getLLMConfig();
const provider = options?.provider || config.provider;
const model = options?.model || config.model;

switch (provider) {
case 'openai': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { openai } = require('@ai-sdk/openai');
return openai(model);
}

case 'anthropic': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { anthropic } = require('@ai-sdk/anthropic');
return anthropic(model);
}

case 'google': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { google } = require('@ai-sdk/google');
return google(model);
}

case 'azure': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { azure } = require('@ai-sdk/azure');
return azure(model);
}

case 'groq': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { createOpenAI } = require('@ai-sdk/openai');
const groq = createOpenAI({
baseURL: 'https://api.groq.com/openai/v1',
apiKey: process.env.GROQ_API_KEY,
});
return groq(model);
}

case 'ollama': {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { createOpenAI } = require('@ai-sdk/openai');
const ollama = createOpenAI({
baseURL: process.env.OLLAMA_BASE_URL || 'http://localhost:11434/v1',
apiKey: 'ollama',
});
return ollama(model);
}

default: {
// eslint-disable-next-line @typescript-eslint/no-var-requires
const { openai } = require('@ai-sdk/openai');
return openai(model);
}
}
}

/**
* AISdkFactory class for static access
*/
export class AISdkFactory {
/**
* Create a Vercel AI SDK model based on configuration (async)
*/
static async createModel(options?: CreateAISdkModelOptions) {
return createAISdkModel(options);
}

/**
* Create a Vercel AI SDK model synchronously based on configured provider
*/
static createModelSync(options?: CreateAISdkModelOptions) {
return createAISdkModelSync(options);
}

/**
* Get the currently configured provider
*/
static getProvider(): LLMProvider {
return getLLMProvider();
}
}
Loading
Loading