Skip to content

Commit

Permalink
update getDirs
Browse files Browse the repository at this point in the history
  • Loading branch information
POPPIN-FUMI committed Mar 11, 2024
2 parents c6a95ea + 4291065 commit 66f657b
Show file tree
Hide file tree
Showing 31 changed files with 856 additions and 365 deletions.
5 changes: 4 additions & 1 deletion .firebaserc
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,12 @@
],
"skeet-ai": [
"skeet-ai"
],
"skeet-cloud-task": [
"skeet-cloud-task"
]
}
}
},
"etags": {}
}
}
2 changes: 1 addition & 1 deletion .github/workflows/website-skeet-ai.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ on:
branches:
- main
paths:
- 'packages/ai/docs/**'
- 'packages/ai/**'
- '.github/workflows/website-skeet-ai.yml'

jobs:
Expand Down
41 changes: 41 additions & 0 deletions .github/workflows/website-skeet-cloud-task.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
name: skeet-cloud-task
on:
push:
branches:
- main
paths:
- 'packages/cloud-task/docs/**'
- '.github/workflows/website-skeet-cloud-task.yml'

jobs:
build:
runs-on: ubuntu-22.04
strategy:
matrix:
node-version: [20.11.0]
steps:
- uses: actions/checkout@v3
- uses: pnpm/action-setup@v3
with:
version: 8
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v4
with:
node-version: ${{ matrix.node-version }}
cache: 'pnpm'
- id: auth
uses: google-github-actions/auth@v2
with:
credentials_json: ${{ secrets.SKEET_GCP_SA_KEY }}
- name: Install firebase tools
run: pnpm add -g firebase-tools
- name: GitHub repository setting
run: git config --global url."https://github.com".insteadOf ssh://[email protected]
- name: Install dependencies
run: pnpm install
- name: Build App
run: pnpm -F cloud-task build-doc
env:
NODE_OPTIONS: --max-old-space-size=8192
- name: Deploy to Firebase
run: firebase deploy --only hosting:skeet-cloud-task
17 changes: 17 additions & 0 deletions firebase.json
Original file line number Diff line number Diff line change
@@ -1,4 +1,16 @@
{
"functions": [
{
"source": "templates/base-functions",
"codebase": "base-functions",
"ignore": [
"node_modules",
".git",
"firebase-debug.log",
"firebase-debug.*.log"
]
}
],
"hosting": [
{
"target": "skeet-utils",
Expand All @@ -14,6 +26,11 @@
"target": "skeet-doc",
"public": "website/skeet-doc/out",
"ignore": ["firebase.json", "**/.*", "**/node_modules/**"]
},
{
"target": "skeet-cloud-task",
"public": "packages/cloud-task/docs",
"ignore": ["firebase.json", "**/.*", "**/node_modules/**"]
}
]
}
1 change: 1 addition & 0 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
"ci:publish": "changeset publish",
"test": "pnpm run -r test",
"build": "pnpm run -r build",
"dev:func": "firebase emulators:start --only functions",
"cli": "pnpm -F \"@skeet-framework/cli\"",
"utils": "pnpm -F \"@skeet-framework/utils\"",
"discord:labo": "tsx ./common/runDiscordChangeLog.ts labo",
Expand Down
6 changes: 6 additions & 0 deletions packages/ai/CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,5 +1,11 @@
# @skeet-framework/ai

## 1.8.2

### Patch Changes

- [#308](https://github.com/elsoul/skeet/pull/308) [`c70fa8f`](https://github.com/elsoul/skeet/commit/c70fa8f24321104f4cdfc82a4738ebf9fa6752c7) Thanks [@POPPIN-FUMI](https://github.com/POPPIN-FUMI)! - add isStream boolean to chat function

## 1.8.1

### Patch Changes
Expand Down
11 changes: 5 additions & 6 deletions packages/ai/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@skeet-framework/ai",
"version": "1.8.1",
"version": "1.8.2",
"description": "Skeet Framework Plugin - AI",
"main": "dist/index.js",
"module": "dist/index.js",
Expand Down Expand Up @@ -40,16 +40,15 @@
"openai": "4.28.4"
},
"devDependencies": {
"@skeet-framework/discord-utils": "workspace:*",
"@skeet-framework/utils": "workspace:*",
"@types/node": "20.11.24",
"@types/node": "20.11.25",
"esbuild": "0.20.1",
"eslint": "8.57.0",
"eslint-config-prettier": "9.1.0",
"prettier": "3.2.5",
"tsx": "4.7.1",
"typedoc": "0.25.11",
"typescript": "5.3.3",
"typedoc": "0.25.12",
"typescript": "5.4.2",
"vite": "5.1.5",
"vite-tsconfig-paths": "4.3.1",
"vitest": "1.3.1"
}
Expand Down
40 changes: 27 additions & 13 deletions packages/ai/src/lib/chat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,13 @@ import { ConfigOpenAIType, defaultOpenAIConfig, openAIChat } from './openAIChat'
import { readGeminiStream } from './readGeminiStream'
import { readOpenAIStream } from './readOpenAIStream'
import { Readable } from 'stream'
import {
GenerateContentResult,
StreamGenerateContentResult,
} from '@google-cloud/vertexai'
import OpenAI from 'openai'
import { geminiChatStream } from './geminiChatStream'
import { openAIChatStream } from './openAIChatStream'

const GEMINI = 'Gemini'
const OPENAI = 'OpenAI'
Expand All @@ -16,6 +23,7 @@ const OPENAI = 'OpenAI'
* @param examples - An array of `InputOutput` objects representing example input-output pairs to guide the model's responses.
* @param input - The user's input string for which a response is requested from the chat model.
* @param aiType - Specifies the chat model to use. Defaults to 'Gemini'. Can be either 'Gemini' or 'OpenAI'.
* @param isStream - A boolean indicating whether to return a stream of the model's response. Defaults to true.
* @param isLogging - A boolean indicating whether to log the stream's content to the console. Defaults to true.
* @returns Returns a Promise resolving to a stream of the model's response. If logging is disabled, the raw stream is returned directly.
* @throws Exits the process with status code 1 if an error occurs.
Expand All @@ -36,6 +44,7 @@ export const chat = async (
examples: InputOutput[],
input: string,
aiType = GEMINI as AIType,
isStream = true,
isLogging = true,
) => {
if (aiType === GEMINI) {
Expand All @@ -51,14 +60,17 @@ export const chat = async (
examples,
input,
)
const stream = await geminiChat(contents, geminiConfig)
if (!isLogging) {
return stream
}
if (stream) {
await readGeminiStream(stream)

if (isStream) {
const stream = await geminiChatStream(contents, geminiConfig)
if (!isLogging) {
return stream as StreamGenerateContentResult
}
await readGeminiStream(stream as StreamGenerateContentResult)
return stream as StreamGenerateContentResult
}
return stream
const resp = await geminiChat(contents, geminiConfig)
return resp
} catch (error) {
console.error('Error:', error)
process.exit(1)
Expand All @@ -81,14 +93,16 @@ export const chat = async (
examples,
input,
)
const stream = await openAIChat(prompt, openaiConfig)
if (!isLogging) {
return stream
}
if (stream) {

if (isStream) {
const stream = await openAIChatStream(prompt, openaiConfig)
if (!isLogging) {
return stream
}
await readOpenAIStream(stream as unknown as Readable)
}
return stream
const resp = await openAIChat(prompt, openaiConfig)
return resp
} catch (error) {
console.error('Error:', error)
process.exit(1)
Expand Down
14 changes: 8 additions & 6 deletions packages/ai/src/lib/geminiChat.ts
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,7 @@ import {
HarmBlockThreshold,
GenerationConfig,
Content,
StreamGenerateContentResult,
} from '@google-cloud/vertexai'
import type { VertexAiResponse } from '@/lib/types/vertexAiResponseTypes'
import dotenv from 'dotenv'
dotenv.config()

Expand Down Expand Up @@ -40,7 +38,7 @@ export const geminiChat = async (
console.error(
'GCP_PROJECT_ID and GCP_LOCATION are required in .env file.\n\nor you can pass them as arguments to the function.',
)
return
process.exit(1)
}
const { model, project, location, ...generation_config } = config
const vertex_ai = new VertexAI({
Expand All @@ -63,9 +61,13 @@ export const geminiChat = async (
const request = {
contents,
}
const streamingResp = await generativeModel.generateContentStream(request)
return streamingResp

const resp = await generativeModel.generateContent(request)
if (resp == null) {
throw new Error('Error in geminiChat: response is null')
}
return resp.response.candidates[0].content.parts[0].text as string
} catch (error) {
throw new Error(`Error in geminiChat: ${error}`)
}
}
}
74 changes: 74 additions & 0 deletions packages/ai/src/lib/geminiChatStream.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
import {
VertexAI,
HarmCategory,
HarmBlockThreshold,
GenerationConfig,
Content,
StreamGenerateContentResult,
} from '@google-cloud/vertexai'
import dotenv from 'dotenv'
dotenv.config()

const project = process.env.GCP_PROJECT_ID || ''
const location = process.env.GCP_LOCATION || ''

export interface ConfigGeminiType extends GenerationConfig {
model: string
project: string
location: string
}

export type GeminiModel = 'gemini-1.0-pro' | 'gemini-1.0-pro-vision'

export const defaultGeminiConfig: ConfigGeminiType = {
project,
location,
max_output_tokens: 256,
temperature: 0.1,
top_p: 1,
top_k: 40,
model: 'gemini-1.0-pro' as GeminiModel,
}

export const geminiChatStream = async (
contents: Content[],
config = defaultGeminiConfig,
) => {
try {
if (config.project === '' || config.location === '') {
console.error(
'GCP_PROJECT_ID and GCP_LOCATION are required in .env file.\n\nor you can pass them as arguments to the function.',
)
process.exit(1)
}
const { model, project, location, ...generation_config } = config
const vertex_ai = new VertexAI({
project,
location,
})

// Instantiate models
const generativeModel = vertex_ai.getGenerativeModel({
model,
safety_settings: [
{
category: HarmCategory.HARM_CATEGORY_DANGEROUS_CONTENT,
threshold: HarmBlockThreshold.BLOCK_MEDIUM_AND_ABOVE,
},
],
generation_config,
})

const request = {
contents,
}

const streamingResp = await generativeModel.generateContentStream(request)
if (streamingResp == null) {
throw new Error('Error in geminiChatStream: No response from Vertex AI')
}
return streamingResp as StreamGenerateContentResult
} catch (error) {
throw new Error(`Error in geminiChat: ${error}`)
}
}
17 changes: 9 additions & 8 deletions packages/ai/src/lib/openAIChat.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,8 @@
import OpenAI from 'openai'
import {
ChatCompletionChunk,
ChatCompletionCreateParamsBase,
ChatCompletionMessageParam,
} from 'openai/resources/chat/completions'
import { Stream } from 'openai/streaming'
import dotenv from 'dotenv'
dotenv.config()

Expand Down Expand Up @@ -48,7 +46,7 @@ export const openAIChat = async (
console.error(
'CHAT_GPT_ORG and CHAT_GPT_KEY are required in .env file.\n\nor you can pass them as arguments to the function.',
)
return
process.exit(1)
}

const ai = new OpenAI({
Expand All @@ -61,11 +59,14 @@ export const openAIChat = async (
max_tokens: config.maxTokens,
top_p: config.topP,
n: config.n,
stream: config.stream,
stream: false,
messages: contents,
}
const stream = (await ai.chat.completions.create(
openaiConfig,
)) as Stream<ChatCompletionChunk>
return stream

const resp = await ai.chat.completions.create(openaiConfig)
if ('choices' in resp) {
return resp.choices[0].message.content as string
} else {
throw new Error('Error in openAIChat: response is null')
}
}
Loading

0 comments on commit 66f657b

Please sign in to comment.