diff --git a/agents/conversational.go b/agents/conversational.go index 5380d9072..fd12c4630 100644 --- a/agents/conversational.go +++ b/agents/conversational.go @@ -2,23 +2,19 @@ package agents import ( "context" - _ "embed" "fmt" "regexp" "strings" "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/i18n" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/prompts" "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" ) -const ( - _conversationalFinalAnswerAction = "AI:" -) - // ConversationalAgent is a struct that represents an agent responsible for deciding // what to do or give the final output if the task is finished given a set of inputs // and previous steps taken. @@ -34,6 +30,10 @@ type ConversationalAgent struct { Tools []tools.Tool // Output key is the key where the final output is placed. OutputKey string + // FinalAnswer is the final answer in various languages. + FinalAnswer string + // Lang is the language the prompt will use. + Lang i18n.Lang // CallbacksHandler is the handler for callbacks. CallbacksHandler callbacks.Handler } @@ -45,6 +45,7 @@ func NewConversationalAgent(llm llms.Model, tools []tools.Tool, opts ...Option) for _, opt := range opts { opt(&options) } + options.loadConversationalTranslatable() return &ConversationalAgent{ Chain: chains.NewLLMChain( @@ -54,6 +55,8 @@ func NewConversationalAgent(llm llms.Model, tools []tools.Tool, opts ...Option) ), Tools: tools, OutputKey: options.outputKey, + FinalAnswer: i18n.AgentsMustPhrase(options.lang, "conversational final answer"), + Lang: options.lang, CallbacksHandler: options.callbacksHandler, } } @@ -69,7 +72,7 @@ func (a *ConversationalAgent) Plan( fullInputs[key] = value } - fullInputs["agent_scratchpad"] = constructScratchPad(intermediateSteps) + fullInputs["agent_scratchpad"] = constructScratchPad(intermediateSteps, a.Lang) var stream func(ctx context.Context, chunk []byte) error @@ -84,7 +87,10 @@ func (a *ConversationalAgent) Plan( ctx, a.Chain, fullInputs, - chains.WithStopWords([]string{"\nObservation:", "\n\tObservation:"}), + chains.WithStopWords([]string{ + fmt.Sprintf("\n%s", i18n.AgentsMustPhrase(a.Lang, "observation")), + fmt.Sprintf("\n\t%s", i18n.AgentsMustPhrase(a.Lang, "observation")), + }), chains.WithStreamingFunc(stream), ) if err != nil { @@ -117,22 +123,22 @@ func (a *ConversationalAgent) GetTools() []tools.Tool { return a.Tools } -func constructScratchPad(steps []schema.AgentStep) string { +func constructScratchPad(steps []schema.AgentStep, lang i18n.Lang) string { var scratchPad string if len(steps) > 0 { for _, step := range steps { scratchPad += step.Action.Log - scratchPad += "\nObservation: " + step.Observation + scratchPad += fmt.Sprintf("\n%s %s", i18n.AgentsMustPhrase(lang, "observation"), step.Observation) } - scratchPad += "\n" + "Thought:" + scratchPad += fmt.Sprintf("\n%s", i18n.AgentsMustPhrase(lang, "thought")) } return scratchPad } func (a *ConversationalAgent) parseOutput(output string) ([]schema.AgentAction, *schema.AgentFinish, error) { - if strings.Contains(output, _conversationalFinalAnswerAction) { - splits := strings.Split(output, _conversationalFinalAnswerAction) + if strings.Contains(output, a.FinalAnswer) { + splits := strings.Split(output, a.FinalAnswer) finishAction := &schema.AgentFinish{ ReturnValues: map[string]any{ @@ -144,7 +150,9 @@ func (a *ConversationalAgent) parseOutput(output string) ([]schema.AgentAction, return nil, finishAction, nil } - r := regexp.MustCompile(`Action: (.*?)[\n]*Action Input: (.*)`) + action, actionInput := i18n.AgentsMustPhrase(a.Lang, "action"), + i18n.AgentsMustPhrase(a.Lang, "action input") + r := regexp.MustCompile(fmt.Sprintf(`%s (.*?)[\n]*%s (.*)`, action, actionInput)) matches := r.FindStringSubmatch(output) if len(matches) == 0 { return nil, nil, fmt.Errorf("%w: %s", ErrUnableToParseOutput, output) @@ -155,15 +163,6 @@ func (a *ConversationalAgent) parseOutput(output string) ([]schema.AgentAction, }, nil, nil } -//go:embed prompts/conversational_prefix.txt -var _defaultConversationalPrefix string //nolint:gochecknoglobals - -//go:embed prompts/conversational_format_instructions.txt -var _defaultConversationalFormatInstructions string //nolint:gochecknoglobals - -//go:embed prompts/conversational_suffix.txt -var _defaultConversationalSuffix string //nolint:gochecknoglobals - func createConversationalPrompt(tools []tools.Tool, prefix, instructions, suffix string) prompts.PromptTemplate { template := strings.Join([]string{prefix, instructions, suffix}, "\n\n") diff --git a/agents/executor.go b/agents/executor.go index 5835ad008..8f0e340c5 100644 --- a/agents/executor.go +++ b/agents/executor.go @@ -36,6 +36,7 @@ func NewExecutor(agent Agent, opts ...Option) *Executor { for _, opt := range opts { opt(&options) } + options.loadExecutorTranslatable() return &Executor{ Agent: agent, diff --git a/agents/mrkl.go b/agents/mrkl.go index 4bc577408..ec312be9a 100644 --- a/agents/mrkl.go +++ b/agents/mrkl.go @@ -9,16 +9,12 @@ import ( "github.com/tmc/langchaingo/callbacks" "github.com/tmc/langchaingo/chains" + "github.com/tmc/langchaingo/i18n" "github.com/tmc/langchaingo/llms" "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" ) -const ( - _finalAnswerAction = "Final Answer:" - _defaultOutputKey = "output" -) - // OneShotZeroAgent is a struct that represents an agent responsible for deciding // what to do or give the final output if the task is finished given a set of inputs // and previous steps taken. @@ -32,6 +28,10 @@ type OneShotZeroAgent struct { Tools []tools.Tool // Output key is the key where the final output is placed. OutputKey string + // FinalAnswer is the final answer in various languages. + FinalAnswer string + // Lang is the language the prompt will use. + Lang i18n.Lang // CallbacksHandler is the handler for callbacks. CallbacksHandler callbacks.Handler } @@ -46,6 +46,7 @@ func NewOneShotAgent(llm llms.Model, tools []tools.Tool, opts ...Option) *OneSho for _, opt := range opts { opt(&options) } + options.loadMrklTranslatable() return &OneShotZeroAgent{ Chain: chains.NewLLMChain( @@ -55,6 +56,8 @@ func NewOneShotAgent(llm llms.Model, tools []tools.Tool, opts ...Option) *OneSho ), Tools: tools, OutputKey: options.outputKey, + FinalAnswer: i18n.AgentsMustPhrase(options.lang, "mrkl final answer"), + Lang: options.lang, CallbacksHandler: options.callbacksHandler, } } @@ -70,8 +73,8 @@ func (a *OneShotZeroAgent) Plan( fullInputs[key] = value } - fullInputs["agent_scratchpad"] = constructMrklScratchPad(intermediateSteps) - fullInputs["today"] = time.Now().Format("January 02, 2006") + fullInputs["agent_scratchpad"] = constructMrklScratchPad(intermediateSteps, a.Lang) + fullInputs["today"] = time.Now().Format(i18n.AgentsMustPhrase(a.Lang, "today format")) var stream func(ctx context.Context, chunk []byte) error @@ -86,7 +89,10 @@ func (a *OneShotZeroAgent) Plan( ctx, a.Chain, fullInputs, - chains.WithStopWords([]string{"\nObservation:", "\n\tObservation:"}), + chains.WithStopWords([]string{ + fmt.Sprintf("\n%s", i18n.AgentsMustPhrase(a.Lang, "observation")), + fmt.Sprintf("\n\t%s", i18n.AgentsMustPhrase(a.Lang, "observation")), + }), chains.WithStreamingFunc(stream), ) if err != nil { @@ -119,12 +125,12 @@ func (a *OneShotZeroAgent) GetTools() []tools.Tool { return a.Tools } -func constructMrklScratchPad(steps []schema.AgentStep) string { +func constructMrklScratchPad(steps []schema.AgentStep, lang i18n.Lang) string { var scratchPad string if len(steps) > 0 { for _, step := range steps { scratchPad += "\n" + step.Action.Log - scratchPad += "\nObservation: " + step.Observation + "\n" + scratchPad += fmt.Sprintf("\n%s %s\n", i18n.AgentsMustPhrase(lang, "observation"), step.Observation) } } @@ -132,8 +138,8 @@ func constructMrklScratchPad(steps []schema.AgentStep) string { } func (a *OneShotZeroAgent) parseOutput(output string) ([]schema.AgentAction, *schema.AgentFinish, error) { - if strings.Contains(output, _finalAnswerAction) { - splits := strings.Split(output, _finalAnswerAction) + if strings.Contains(output, a.FinalAnswer) { + splits := strings.Split(output, a.FinalAnswer) return nil, &schema.AgentFinish{ ReturnValues: map[string]any{ @@ -143,7 +149,15 @@ func (a *OneShotZeroAgent) parseOutput(output string) ([]schema.AgentAction, *sc }, nil } - r := regexp.MustCompile(`Action:\s*(.+)\s*Action Input:\s(?s)*(.+)`) + action, actionInput, observation := i18n.AgentsMustPhrase(a.Lang, "action"), + i18n.AgentsMustPhrase(a.Lang, "action input"), + i18n.AgentsMustPhrase(a.Lang, "observation") + var r *regexp.Regexp + if strings.Contains(output, observation) { + r = regexp.MustCompile(fmt.Sprintf(`%s\s*(.+)\s*%s\s(?s)*(.+)%s`, action, actionInput, observation)) + } else { + r = regexp.MustCompile(fmt.Sprintf(`%s\s*(.+)\s*%s\s(?s)*(.+)`, action, actionInput)) + } matches := r.FindStringSubmatch(output) if len(matches) == 0 { return nil, nil, fmt.Errorf("%w: %s", ErrUnableToParseOutput, output) diff --git a/agents/mrkl_prompt.go b/agents/mrkl_prompt.go index eb929febb..6fdedf424 100644 --- a/agents/mrkl_prompt.go +++ b/agents/mrkl_prompt.go @@ -8,29 +8,6 @@ import ( "github.com/tmc/langchaingo/tools" ) -const ( - _defaultMrklPrefix = `Today is {{.today}}. -Answer the following questions as best you can. You have access to the following tools: - -{{.tool_descriptions}}` - - _defaultMrklFormatInstructions = `Use the following format: - -Question: the input question you must answer -Thought: you should always think about what to do -Action: the action to take, should be one of [ {{.tool_names}} ] -Action Input: the input to the action -Observation: the result of the action -... (this Thought/Action/Action Input/Observation can repeat N times) -Thought: I now know the final answer -Final Answer: the final answer to the original input question` - - _defaultMrklSuffix = `Begin! - -Question: {{.input}} -{{.agent_scratchpad}}` -) - func createMRKLPrompt(tools []tools.Tool, prefix, instructions, suffix string) prompts.PromptTemplate { template := strings.Join([]string{prefix, instructions, suffix}, "\n\n") diff --git a/agents/markl_test.go b/agents/mrkl_test.go similarity index 86% rename from agents/markl_test.go rename to agents/mrkl_test.go index 3112f77ea..671a8b72e 100644 --- a/agents/markl_test.go +++ b/agents/mrkl_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/require" + "github.com/tmc/langchaingo/i18n" "github.com/tmc/langchaingo/schema" ) @@ -38,7 +39,11 @@ func TestMRKLOutputParser(t *testing.T) { }, } - a := OneShotZeroAgent{} + lang := i18n.EN + a := OneShotZeroAgent{ + FinalAnswer: i18n.AgentsMustPhrase(lang, "mrkl final answer"), + Lang: lang, + } for _, tc := range testCases { actions, finish, err := a.parseOutput(tc.input) require.ErrorIs(t, tc.expectedErr, err) diff --git a/agents/openai_functions_agent.go b/agents/openai_functions_agent.go index 1ff621a29..533dbdf93 100644 --- a/agents/openai_functions_agent.go +++ b/agents/openai_functions_agent.go @@ -38,6 +38,7 @@ func NewOpenAIFunctionsAgent(llm llms.Model, tools []tools.Tool, opts ...Option) for _, opt := range opts { opt(&options) } + options.loadOpenAIFunctionsTranslatable() return &OpenAIFunctionsAgent{ LLM: llm, diff --git a/agents/options.go b/agents/options.go index 946b48c4d..054b30382 100644 --- a/agents/options.go +++ b/agents/options.go @@ -2,12 +2,20 @@ package agents import ( "github.com/tmc/langchaingo/callbacks" + "github.com/tmc/langchaingo/i18n" "github.com/tmc/langchaingo/memory" "github.com/tmc/langchaingo/prompts" "github.com/tmc/langchaingo/schema" "github.com/tmc/langchaingo/tools" ) +type translatable struct { + promptPrefix string + formatInstructions string + promptSuffix string + outputKey string +} + type Options struct { prompt prompts.PromptTemplate memory schema.Memory @@ -15,10 +23,8 @@ type Options struct { errorHandler *ParserErrorHandler maxIterations int returnIntermediateSteps bool - outputKey string - promptPrefix string - formatInstructions string - promptSuffix string + lang i18n.Lang + translatable // openai systemMessage string @@ -29,40 +35,55 @@ type Options struct { // and executors. type Option func(*Options) -func executorDefaultOptions() Options { +func defaultOptions() Options { return Options{ - maxIterations: _defaultMaxIterations, - outputKey: _defaultOutputKey, - memory: memory.NewSimple(), + lang: i18n.DefaultLang, } } +func executorDefaultOptions() Options { + options := defaultOptions() + options.maxIterations = _defaultMaxIterations + options.memory = memory.NewSimple() + return options +} + func mrklDefaultOptions() Options { - return Options{ - promptPrefix: _defaultMrklPrefix, - formatInstructions: _defaultMrklFormatInstructions, - promptSuffix: _defaultMrklSuffix, - outputKey: _defaultOutputKey, - } + return defaultOptions() } func conversationalDefaultOptions() Options { - return Options{ - promptPrefix: _defaultConversationalPrefix, - formatInstructions: _defaultConversationalFormatInstructions, - promptSuffix: _defaultConversationalSuffix, - outputKey: _defaultOutputKey, - } + return defaultOptions() } func openAIFunctionsDefaultOptions() Options { - return Options{ - systemMessage: "You are a helpful AI assistant.", - outputKey: _defaultOutputKey, - } + return defaultOptions() } -func (co Options) getMrklPrompt(tools []tools.Tool) prompts.PromptTemplate { +func (co *Options) loadExecutorTranslatable() { + co.outputKey = i18n.AgentsMustPhrase(co.lang, "output key") +} + +func (co *Options) loadMrklTranslatable() { + co.promptPrefix = i18n.AgentsMustLoad(co.lang, "mrkl_prompt_prefix.txt") + co.formatInstructions = i18n.AgentsMustLoad(co.lang, "mrkl_prompt_format_instructions.txt") + co.promptSuffix = i18n.AgentsMustLoad(co.lang, "mrkl_prompt_suffix.txt") + co.outputKey = i18n.AgentsMustPhrase(co.lang, "output key") +} + +func (co *Options) loadConversationalTranslatable() { + co.promptPrefix = i18n.AgentsMustLoad(co.lang, "conversational_prompt_prefix.txt") + co.formatInstructions = i18n.AgentsMustLoad(co.lang, "conversational_prompt_format_instructions.txt") + co.promptSuffix = i18n.AgentsMustLoad(co.lang, "conversational_prompt_suffix.txt") + co.outputKey = i18n.AgentsMustPhrase(co.lang, "output key") +} + +func (co *Options) loadOpenAIFunctionsTranslatable() { + co.systemMessage = i18n.AgentsMustPhrase(co.lang, "system message") + co.outputKey = i18n.AgentsMustPhrase(co.lang, "output key") +} + +func (co *Options) getMrklPrompt(tools []tools.Tool) prompts.PromptTemplate { if co.prompt.Template != "" { return co.prompt } @@ -75,7 +96,7 @@ func (co Options) getMrklPrompt(tools []tools.Tool) prompts.PromptTemplate { ) } -func (co Options) getConversationalPrompt(tools []tools.Tool) prompts.PromptTemplate { +func (co *Options) getConversationalPrompt(tools []tools.Tool) prompts.PromptTemplate { if co.prompt.Template != "" { return co.prompt } @@ -161,6 +182,13 @@ func WithParserErrorHandler(errorHandler *ParserErrorHandler) Option { } } +// WithLang is an option for setting language the prompt will use. +func WithLang(lang i18n.Lang) Option { + return func(co *Options) { + co.lang = lang + } +} + type OpenAIOption struct{} func NewOpenAIOption() OpenAIOption { diff --git a/i18n/doc.go b/i18n/doc.go new file mode 100644 index 000000000..6762de5b4 --- /dev/null +++ b/i18n/doc.go @@ -0,0 +1,6 @@ +// Package i18n provides internationalization for other packages, +// currently the agents package. +// - Support for package agents: If non-English questions are inputted, +// the agents won't work well with the English prompt, so the +// corresponding-language prompt will be necessary. +package i18n diff --git a/i18n/lang.go b/i18n/lang.go new file mode 100644 index 000000000..eaaef4269 --- /dev/null +++ b/i18n/lang.go @@ -0,0 +1,14 @@ +package i18n + +// Lang is the language type. +type Lang int + +const ( + // EN stands for English. + EN Lang = iota + // ZH stands for Simplified-Chinese. + ZH + + // DefaultLang is the default language. + DefaultLang = EN +) diff --git a/i18n/load.go b/i18n/load.go new file mode 100644 index 000000000..97cf3cac4 --- /dev/null +++ b/i18n/load.go @@ -0,0 +1,28 @@ +package i18n + +import ( + "embed" + "fmt" + "io/fs" + "log" +) + +//go:embed templates/* +var tpls embed.FS + +func mustLoad(lang Lang, kindFolder, filename string) string { + langFolderMap := map[Lang]string{ + EN: "en", + ZH: "zh", + } + langFolder, ok := langFolderMap[lang] + if !ok { + log.Panic("unknown language: ", lang) + } + filepath := fmt.Sprintf("templates/%s/%s/%s", kindFolder, langFolder, filename) + b, err := fs.ReadFile(tpls, filepath) + if err != nil { + log.Panic("read file failed: ", err) + } + return string(b) +} diff --git a/i18n/load_agents.go b/i18n/load_agents.go new file mode 100644 index 000000000..cacae2dbc --- /dev/null +++ b/i18n/load_agents.go @@ -0,0 +1,40 @@ +package i18n + +import ( + "encoding/json" + "log" + "sync" +) + +// AgentsMustLoad loads file of agents by the given language and filename. +// Will panic if any problem occurs, including unsupported language, +// unknown filename and other problems. +func AgentsMustLoad(lang Lang, filename string) string { + return mustLoad(lang, "agents", filename) +} + +// nolint:gochecknoglobals +var langAgentsPhraseMap sync.Map + +// AgentsMustPhrase loads phrase of agents by the given language and key. +// Will panic if any problem occurs, including unsupported language, +// unknown key and other problems. +func AgentsMustPhrase(lang Lang, key string) string { + var agentsPhraseMap map[string]string + valAny, ok := langAgentsPhraseMap.Load(lang) + if ok { + agentsPhraseMap, _ = valAny.(map[string]string) + } else { + s := AgentsMustLoad(lang, "phrase.json") + agentsPhraseMap = make(map[string]string) + if err := json.Unmarshal([]byte(s), &agentsPhraseMap); err != nil { + log.Panic("unmarshal phrase failed:", err) + } + langAgentsPhraseMap.Store(lang, agentsPhraseMap) + } + val, ok := agentsPhraseMap[key] + if !ok { + log.Panic("there is no such key in phrase:", key) + } + return val +} diff --git a/i18n/load_agents_test.go b/i18n/load_agents_test.go new file mode 100644 index 000000000..8d1a3a2c4 --- /dev/null +++ b/i18n/load_agents_test.go @@ -0,0 +1,59 @@ +package i18n + +import ( + "strings" + "testing" +) + +func TestAgentsMustPhrase(t *testing.T) { + t.Parallel() + type args struct { + lang Lang + key string + } + tests := []struct { + name string + args args + want string + wantPanic bool + wantPanicLike string + }{ + { + name: "should succeed", + args: args{ + lang: ZH, + key: "thought", + }, + want: "思考:", + }, + { + name: "should panic", + args: args{ + lang: ZH, + key: "invalid key", + }, + wantPanic: true, + wantPanicLike: "there is no such key in phrase", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if tt.wantPanic { + defer func() { + if r := recover(); r == nil { + t.Errorf("want panic, but did not happen") + } else if s, ok := r.(string); !ok { + t.Errorf("unexpected panic type") + } else if !strings.Contains(s, tt.wantPanicLike) { + t.Errorf("panic = %v, want %v", r, tt.wantPanicLike) + } + }() + } + + if got := AgentsMustPhrase(tt.args.lang, tt.args.key); got != tt.want { + t.Errorf("AgentsMustPhrase() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/i18n/load_test.go b/i18n/load_test.go new file mode 100644 index 000000000..9f2440288 --- /dev/null +++ b/i18n/load_test.go @@ -0,0 +1,81 @@ +package i18n + +import ( + "strings" + "testing" +) + +func Test_mustLoad(t *testing.T) { + t.Parallel() + type args struct { + lang Lang + kindFolder string + filename string + } + tests := []struct { + name string + args args + want string + wantPanic bool + wantPanicLike string + }{ + { + name: "should succeed", + args: args{ + lang: ZH, + kindFolder: "agents", + filename: "mrkl_prompt_format_instructions.txt", + }, + want: `严格遵循以下格式: + +问题: 你要回答的问题 +思考: 针对这个问题,你的思考过程 +工具: 你将要使用的工具,只能在 [ {{.tool_names}} ] 中选择一个 +工具参数: 使用工具所需的参数 +结果: 使用工具获得的结果 +...(『思考/工具/工具参数/结果』可以重复多次) +思考: 我知道最终答案了 +最终答案: 问题的最终答案`, + }, + { + name: "should panic due to unknown language", + args: args{ + lang: Lang(-1), + kindFolder: "whatever", + filename: "whatever", + }, + wantPanic: true, + wantPanicLike: "unknown language", + }, + { + name: "should panic due to reading file failed", + args: args{ + lang: EN, + kindFolder: "agents", + filename: "whatever", + }, + wantPanic: true, + wantPanicLike: "read file failed", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + if tt.wantPanic { + defer func() { + if r := recover(); r == nil { + t.Errorf("want panic, but did not happen") + } else if s, ok := r.(string); !ok { + t.Errorf("unexpected panic type") + } else if !strings.Contains(s, tt.wantPanicLike) { + t.Errorf("panic = %v, want %v", r, tt.wantPanicLike) + } + }() + } + + if got := mustLoad(tt.args.lang, tt.args.kindFolder, tt.args.filename); got != tt.want { + t.Errorf("mustLoad() = %v, want %v", got, tt.want) + } + }) + } +} diff --git a/agents/prompts/conversational_format_instructions.txt b/i18n/templates/agents/en/conversational_prompt_format_instructions.txt similarity index 100% rename from agents/prompts/conversational_format_instructions.txt rename to i18n/templates/agents/en/conversational_prompt_format_instructions.txt diff --git a/agents/prompts/conversational_prefix.txt b/i18n/templates/agents/en/conversational_prompt_prefix.txt similarity index 100% rename from agents/prompts/conversational_prefix.txt rename to i18n/templates/agents/en/conversational_prompt_prefix.txt diff --git a/agents/prompts/conversational_suffix.txt b/i18n/templates/agents/en/conversational_prompt_suffix.txt similarity index 100% rename from agents/prompts/conversational_suffix.txt rename to i18n/templates/agents/en/conversational_prompt_suffix.txt diff --git a/i18n/templates/agents/en/mrkl_prompt_format_instructions.txt b/i18n/templates/agents/en/mrkl_prompt_format_instructions.txt new file mode 100644 index 000000000..06a7c2995 --- /dev/null +++ b/i18n/templates/agents/en/mrkl_prompt_format_instructions.txt @@ -0,0 +1,10 @@ +Use the following format: + +Question: the input question you must answer +Thought: you should always think about what to do +Action: the action to take, should be one of [ {{.tool_names}} ] +Action Input: the input to the action +Observation: the result of the action +... (this Thought/Action/Action Input/Observation can repeat N times) +Thought: I now know the final answer +Final Answer: the final answer to the original input question \ No newline at end of file diff --git a/i18n/templates/agents/en/mrkl_prompt_prefix.txt b/i18n/templates/agents/en/mrkl_prompt_prefix.txt new file mode 100644 index 000000000..e35d626b9 --- /dev/null +++ b/i18n/templates/agents/en/mrkl_prompt_prefix.txt @@ -0,0 +1,4 @@ +Today is {{.today}}. +Answer the following questions as best you can. You have access to the following tools: + +{{.tool_descriptions}} \ No newline at end of file diff --git a/i18n/templates/agents/en/mrkl_prompt_suffix.txt b/i18n/templates/agents/en/mrkl_prompt_suffix.txt new file mode 100644 index 000000000..12235f61e --- /dev/null +++ b/i18n/templates/agents/en/mrkl_prompt_suffix.txt @@ -0,0 +1,4 @@ +Begin! + +Question: {{.input}} +Thought: {{.agent_scratchpad}} \ No newline at end of file diff --git a/i18n/templates/agents/en/phrase.json b/i18n/templates/agents/en/phrase.json new file mode 100644 index 000000000..23648a6c7 --- /dev/null +++ b/i18n/templates/agents/en/phrase.json @@ -0,0 +1,11 @@ +{ + "thought": "Thought:", + "action": "Action:", + "action input": "Action Input:", + "observation": "Observation:", + "output key": "Output:", + "system message": "You are a helpful AI assistant.", + "today format": "January 02, 2006", + "mrkl final answer": "Final Answer:", + "conversational final answer": "AI:" +} \ No newline at end of file diff --git a/i18n/templates/agents/zh/conversational_prompt_format_instructions.txt b/i18n/templates/agents/zh/conversational_prompt_format_instructions.txt new file mode 100644 index 000000000..c85241666 --- /dev/null +++ b/i18n/templates/agents/zh/conversational_prompt_format_instructions.txt @@ -0,0 +1,11 @@ +使用工具时,遵循以下格式: + +思考: 我需要使用工具吗?是的 +工具: 你将要使用的工具,只能在 [ {{.tool_names}} ] 中选择一个 +工具参数: 使用工具所需的参数 +结果: 使用工具获得的结果 + +当你对人类有回应时,或者如果你不需要使用工具,你必须使用以下格式: + +思考: 我需要使用工具吗?不 +AI: [你的回答] diff --git a/i18n/templates/agents/zh/conversational_prompt_prefix.txt b/i18n/templates/agents/zh/conversational_prompt_prefix.txt new file mode 100644 index 000000000..a0937365a --- /dev/null +++ b/i18n/templates/agents/zh/conversational_prompt_prefix.txt @@ -0,0 +1,6 @@ +工具: +------ + +你可以使用以下工具: + +{{.tool_descriptions}} \ No newline at end of file diff --git a/i18n/templates/agents/zh/conversational_prompt_suffix.txt b/i18n/templates/agents/zh/conversational_prompt_suffix.txt new file mode 100644 index 000000000..c53222be4 --- /dev/null +++ b/i18n/templates/agents/zh/conversational_prompt_suffix.txt @@ -0,0 +1,8 @@ +开始吧! + +对话历史: +{{.history}} + +最新输入: {{.input}} + +思考: {{.agent_scratchpad}} \ No newline at end of file diff --git a/i18n/templates/agents/zh/mrkl_prompt_format_instructions.txt b/i18n/templates/agents/zh/mrkl_prompt_format_instructions.txt new file mode 100644 index 000000000..f27d63043 --- /dev/null +++ b/i18n/templates/agents/zh/mrkl_prompt_format_instructions.txt @@ -0,0 +1,10 @@ +严格遵循以下格式: + +问题: 你要回答的问题 +思考: 针对这个问题,你的思考过程 +工具: 你将要使用的工具,只能在 [ {{.tool_names}} ] 中选择一个 +工具参数: 使用工具所需的参数 +结果: 使用工具获得的结果 +...(『思考/工具/工具参数/结果』可以重复多次) +思考: 我知道最终答案了 +最终答案: 问题的最终答案 \ No newline at end of file diff --git a/i18n/templates/agents/zh/mrkl_prompt_prefix.txt b/i18n/templates/agents/zh/mrkl_prompt_prefix.txt new file mode 100644 index 000000000..9330b199f --- /dev/null +++ b/i18n/templates/agents/zh/mrkl_prompt_prefix.txt @@ -0,0 +1,4 @@ +今天是 {{.today}}。 +尽可能地回答问题。你可以使用以下工具: + +{{.tool_descriptions}} \ No newline at end of file diff --git a/i18n/templates/agents/zh/mrkl_prompt_suffix.txt b/i18n/templates/agents/zh/mrkl_prompt_suffix.txt new file mode 100644 index 000000000..a10f46ee9 --- /dev/null +++ b/i18n/templates/agents/zh/mrkl_prompt_suffix.txt @@ -0,0 +1,4 @@ +开始吧! + +问题: {{.input}} +思考: {{.agent_scratchpad}} \ No newline at end of file diff --git a/i18n/templates/agents/zh/phrase.json b/i18n/templates/agents/zh/phrase.json new file mode 100644 index 000000000..463dc36e6 --- /dev/null +++ b/i18n/templates/agents/zh/phrase.json @@ -0,0 +1,11 @@ +{ + "thought": "思考:", + "action": "工具:", + "action input": "工具参数:", + "observation": "结果:", + "output key": "输出:", + "system message": "你是一个牛逼的AI助手。", + "today format": "2006年01月02日", + "mrkl final answer": "最终答案:", + "conversational final answer": "AI:" +} \ No newline at end of file