Skip to content

Commit

Permalink
Update chat model selection and settings
Browse files Browse the repository at this point in the history
  • Loading branch information
Royal-lobster committed Oct 25, 2024
1 parent dbfbbc6 commit f6d3cb4
Show file tree
Hide file tree
Showing 7 changed files with 85 additions and 151 deletions.
63 changes: 11 additions & 52 deletions src/components/Settings/Sections/ChatSettings.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,12 @@ import { capitalizeText } from '../../../lib/capitalizeText'
import { validateApiKey } from '../../../lib/validApiKey'
import FieldWrapper from '../Elements/FieldWrapper'
import SectionHeading from '../Elements/SectionHeading'
import { type AvailableModels, Mode } from '../../../config/settings'
import { getReadableModelName } from '../../../lib/getReadableModelName'
import { Mode } from '../../../config/settings'

const ChatSettings = () => {
const [settings, setSettings] = useSettings()
const [showPassword, setShowPassword] = useState(false)
const { availableModels, fetchLocalModels } = useChatModels()
const { models, setActiveChatModel } = useChatModels()
const OpenAiApiKeyInputRef = React.useRef<HTMLInputElement>(null)
const OpenAiBaseUrlInputRef = React.useRef<HTMLInputElement>(null)

Expand Down Expand Up @@ -120,69 +119,29 @@ const ChatSettings = () => {
Update
</button>
</div>
</FieldWrapper>{' '}
{/* =========================
Model Setting
===========================*/}
<FieldWrapper
title="Show Local Models"
description="Show local models in the model selection via ollama (https://ollama.com/) which allows you to use open source models that run on your machine."
row={true}
>
<Switch.Root
checked={chatSettings.showLocalModels}
onCheckedChange={(value) => {
setSettings({
...settings,
chat: {
...chatSettings,
showLocalModels: value,
},
})
fetchLocalModels()
}}
className="cdx-w-[42px] cdx-h-[25px] cdx-bg-neutral-500 cdx-rounded-full cdx-relative data-[state=checked]:cdx-bg-blue-500 cdx-outline-none cdx-cursor-default"
>
<Switch.Thumb className="cdx-block cdx-w-[21px] cdx-h-[21px] cdx-bg-white cdx-rounded-full cdx-transition-transform cdx-duration-100 cdx-translate-x-0.5 cdx-will-change-transform data-[state=checked]:cdx-translate-x-[19px]" />
</Switch.Root>
</FieldWrapper>
{chatSettings.showLocalModels && (
<div>
🚧 NOTE: You must run this command for this to work:
<code className="cdx-block dark:cdx-bg-white/10 cdx-bg-black/10 cdx-rounded cdx-mt-2 cdx-p-2">
OLLAMA_ORIGINS=
{window.location.origin} ollama start
</code>
</div>
)}
<FieldWrapper
title="Model"
description="Choose between OpenAI Chat Modals. For more information, visit https://platform.openai.com/docs/models/overview"
description="Choose between available chat models"
row={true}
>
<select
value={chatSettings.model}
value={chatSettings.model?.id || ''}
className="input cdx-w-44"
onChange={(e) => {
setSettings({
...settings,
chat: {
...chatSettings,
model: e.target.value as AvailableModels,
},
})
const selectedModel = models.find((m) => m.id === e.target.value)
if (selectedModel) {
setActiveChatModel(selectedModel)
}
}}
>
{availableModels.map(([model, value]) => (
<option key={model} value={value}>
{getReadableModelName(model)}
{models.map((model) => (
<option key={model.id} value={model.id}>
{model.name}
</option>
))}
</select>
</FieldWrapper>
{/* =========================
Mode Setting
===========================*/}
<FieldWrapper
title="Mode"
description="Tweak temperature of response. Creative will generate more non deterministic responses, Precise will generate more deterministic responses."
Expand Down
18 changes: 9 additions & 9 deletions src/components/Sidebar/chat/ChangeChatModel.tsx
Original file line number Diff line number Diff line change
@@ -1,24 +1,24 @@
import { BsRobot } from 'react-icons/bs'
import type { AvailableModels } from '../../../config/settings'
import { useChatModels } from '../../../hooks/useChatModels'
import { getReadableModelName } from '../../../lib/getReadableModelName'

const ChangeChatModel = () => {
const { availableModels, activeChatModel, setActiveChatModel } =
useChatModels()
const { models, activeChatModel, setActiveChatModel } = useChatModels()
return (
<div className="cdx-flex cdx-items-center cdx-gap-1 cdx-text-neutral-500 dark:cdx-bg-neutral-900 cdx-bg-neutral-200 cdx-border cdx-rounded-md cdx-border-neutral-400/30 dark:cdx-border-neutral-500/30 cdx-py-1 cdx-px-3">
<BsRobot size={18} className="cdx-flex-shrink-0" />
<select
value={activeChatModel}
value={activeChatModel?.id || ''}
className="cdx-bg-transparent !m-0 !p-0 cdx-box-border cdx-w-min focus:cdx-outline-none focus:cdx-ring-1"
onChange={(e) => {
setActiveChatModel(e.target.value as AvailableModels)
const selectedModel = models.find((m) => m.id === e.target.value)
if (selectedModel) {
setActiveChatModel(selectedModel)
}
}}
>
{availableModels.map(([model, value]) => (
<option key={model} value={value}>
{getReadableModelName(model)}
{models.map((model) => (
<option key={model.id} value={model.id}>
{model.name}
</option>
))}
</select>
Expand Down
10 changes: 3 additions & 7 deletions src/components/Sidebar/chat/index.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ import ChatList from './ChatList'
import { SidebarInput } from './ChatInput'
import { useChatCompletion } from '../../../hooks/useChatCompletion'
import { SYSTEM_PROMPT } from '../../../config/prompts'
import { AvailableModels, type Settings } from '../../../config/settings'
import type { Settings } from '../../../config/settings'

interface ChatProps {
settings: Settings
Expand All @@ -19,7 +19,7 @@ const Chat = ({ settings }: ChatProps) => {
removeMessagePair,
error,
} = useChatCompletion({
model: settings.chat.model,
model: settings.chat.model!,
apiKey: settings.chat.openAIKey!,
mode: settings.chat.mode,
systemPrompt: SYSTEM_PROMPT,
Expand Down Expand Up @@ -58,11 +58,7 @@ const Chat = ({ settings }: ChatProps) => {
clearMessages={clearMessages}
cancelRequest={cancelRequest}
isWebpageContextOn={settings.general.webpageContext}
isVisionModel={
settings.chat.model === AvailableModels.GPT_4_TURBO ||
settings.chat.model === AvailableModels.GPT_4O ||
settings.chat.model === AvailableModels.GPT_4O_MINI
}
isVisionModel={settings.chat.model?.capabilities.vision || false}
/>
</>
)
Expand Down
31 changes: 19 additions & 12 deletions src/config/settings/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,21 +6,30 @@ export enum ThemeOptions {
SYSTEM = 'system',
}

export enum AvailableModels {
GPT_4O = 'gpt-4o',
GPT_4_TURBO = 'gpt-4-turbo',
GPT_4 = 'gpt-4',
GPT_3_5_TURBO = 'gpt-3.5-turbo',
GPT_4O_MINI = 'gpt-4o-mini',
}

export enum Mode {
HIGHLY_PRECISE = 0,
PRECISE = 0.5,
BALANCED = 1,
CREATIVE = 1.5,
}

export type ModelCapabilities = {
completion_chat: boolean
function_calling: boolean
vision: boolean
fine_tuning: boolean
completion_fim: boolean
}

export type ModelInfo = {
id: string
name: string
description: string
capabilities: ModelCapabilities
max_context_length: number
owned_by: string
}

export type Settings = {
quickMenu: {
enabled: boolean
Expand All @@ -29,9 +38,8 @@ export type Settings = {
}
chat: {
openAIKey: string | null
model: AvailableModels
model: ModelInfo | null
mode: Mode
showLocalModels: boolean
openAiBaseUrl: string | null
}
general: {
Expand All @@ -48,9 +56,8 @@ export const defaultSettings: Settings = {
},
chat: {
openAIKey: null,
model: AvailableModels.GPT_4O_MINI,
model: null,
mode: Mode.BALANCED,
showLocalModels: false,
openAiBaseUrl: null,
},
general: {
Expand Down
46 changes: 13 additions & 33 deletions src/hooks/useChatCompletion.ts
Original file line number Diff line number Diff line change
@@ -1,35 +1,24 @@
import endent from 'endent'
import { ChatOpenAI } from '@langchain/openai'
import { Ollama } from '@langchain/community/llms/ollama'
import {
AIMessage,
HumanMessage,
SystemMessage,
} from '@langchain/core/messages'
import { useMemo, useState } from 'react'
import { AvailableModels, type Mode } from '../config/settings'
import type { Mode, ModelInfo } from '../config/settings'
import { getMatchedContent } from '../lib/getMatchedContent'
import { ChatRole, useCurrentChat } from './useCurrentChat'
import type { MessageDraft } from './useMessageDraft'

interface UseChatCompletionProps {
model: AvailableModels
model: ModelInfo
apiKey: string
mode: Mode
systemPrompt: string
baseURL: string
}

/**
* This hook is responsible for managing the chat completion
* functionality by using the useCurrentChat hook
*
* It adds functions for
* - submitting a query to the chat
* - cancelling a query
*
* And returns them along with useful state from useCurrentChat hook
*/
let controller: AbortController

export const useChatCompletion = ({
Expand All @@ -51,20 +40,16 @@ export const useChatCompletion = ({
const [error, setError] = useState<Error | null>(null)

const llm = useMemo(() => {
const isOpenAIModel = Object.values(AvailableModels).includes(model)
if (isOpenAIModel) {
return new ChatOpenAI({
streaming: true,
openAIApiKey: apiKey,
modelName: model,
configuration: {
baseURL: baseURL,
},
temperature: Number(mode),
maxTokens: 4_096,
})
}
return new Ollama({ model: model.replace('ollama-', '') })
return new ChatOpenAI({
streaming: true,
openAIApiKey: apiKey,
modelName: model.id,
configuration: {
baseURL: baseURL,
},
temperature: Number(mode),
maxTokens: model.max_context_length,
})
}, [apiKey, model, mode, baseURL])

const previousMessages = messages.map((msg) => {
Expand All @@ -90,11 +75,6 @@ export const useChatCompletion = ({
setGenerating(true)

try {
/**
* If context is provided, we need to use the LLM to get the relevant documents
* and then run the LLM on those documents. We use in memory vector store to
* get the relevant documents
*/
let matchedContext: string | undefined
if (context) {
matchedContext = await getMatchedContent(
Expand All @@ -119,7 +99,7 @@ export const useChatCompletion = ({
...previousMessages,
new HumanMessage({
content:
message.files.length > 0
message.files.length > 0 && model.capabilities.vision
? [
{ type: 'text', text: expandedQuery },
...(message.files.length > 0
Expand Down
54 changes: 29 additions & 25 deletions src/hooks/useChatModels.ts
Original file line number Diff line number Diff line change
@@ -1,52 +1,56 @@
import { useCallback, useEffect, useState } from 'react'
import { useSettings } from './useSettings'
import axios from 'axios'
import { AvailableModels } from '../config/settings'
import type { ModelInfo } from '../config/settings'

export const useChatModels = () => {
const [settings, setSettings] = useSettings()
const [dynamicModels, setDynamicModels] = useState<string[]>([])
const [models, setModels] = useState<ModelInfo[]>([])
const chatSettings = settings.chat
const activeChatModel = chatSettings.model

const fetchLocalModels = useCallback(async () => {
if (chatSettings.showLocalModels) {
const {
data: { models },
} = await axios<{ models: { name: string }[] }>(
'http://localhost:11434/api/tags',
)
if (models) {
setDynamicModels(models.map((m) => m.name))
const fetchAvailableModels = useCallback(async () => {
if (chatSettings.openAIKey) {
try {
const baseUrl =
chatSettings.openAiBaseUrl || 'https://api.openai.com/v1'
const { data } = await axios.get(`${baseUrl}/models`, {
headers: {
Authorization: `Bearer ${chatSettings.openAIKey}`,
},
})

// Filter for chat-capable models
const chatModels = data.data.filter(
(model: ModelInfo) => model.capabilities?.completion_chat === true,
)

setModels(chatModels)
} catch (error) {
console.log('Failed to fetch models:', error)
setModels([])
}
} else {
setDynamicModels([])
}
}, [chatSettings.showLocalModels])
}, [chatSettings.openAIKey, chatSettings.openAiBaseUrl])

useEffect(() => {
fetchLocalModels()
}, [fetchLocalModels])

const availableModels = [
...Object.entries(AvailableModels),
...dynamicModels.map((m) => [m, m]),
]
fetchAvailableModels()
}, [fetchAvailableModels])

const setActiveChatModel = (model: AvailableModels) => {
const setActiveChatModel = (model: ModelInfo) => {
setSettings({
...settings,
chat: {
...chatSettings,
model: model,
model,
},
})
}

return {
availableModels,
models,
activeChatModel,
setActiveChatModel,
fetchLocalModels,
fetchAvailableModels,
}
}
Loading

0 comments on commit f6d3cb4

Please sign in to comment.