|
| 1 | +package yzma |
| 2 | + |
| 3 | +import ( |
| 4 | + "context" |
| 5 | + "errors" |
| 6 | + "os" |
| 7 | + |
| 8 | + "github.com/hybridgroup/yzma/pkg/llama" |
| 9 | + "github.com/tmc/langchaingo/llms" |
| 10 | +) |
| 11 | + |
| 12 | +const ( |
| 13 | + defaultTemperature = 0.8 |
| 14 | + defaultTopK = 40 |
| 15 | + defaultTopP = 0.9 |
| 16 | +) |
| 17 | + |
| 18 | +// LLM is a yzma local implementation wrapper to call directly to llama.cpp libs using the FFI interface. |
| 19 | +type LLM struct { |
| 20 | + model string |
| 21 | + options options |
| 22 | +} |
| 23 | + |
| 24 | +// New creates a new yzma LLM implementation. |
| 25 | +func New(opts ...Option) (*LLM, error) { |
| 26 | + o := options{} |
| 27 | + for _, opt := range opts { |
| 28 | + opt(&o) |
| 29 | + } |
| 30 | + |
| 31 | + libPath := os.Getenv("YZMA_LIB") |
| 32 | + if libPath == "" { |
| 33 | + return nil, errors.New("no path to yzma libs") |
| 34 | + } |
| 35 | + |
| 36 | + if err := llama.Load(""); err != nil { |
| 37 | + return nil, err |
| 38 | + } |
| 39 | + |
| 40 | + llama.LogSet(llama.LogSilent()) |
| 41 | + llama.Init() |
| 42 | + |
| 43 | + llm := LLM{ |
| 44 | + model: o.model, |
| 45 | + options: o, |
| 46 | + } |
| 47 | + |
| 48 | + return &llm, nil |
| 49 | +} |
| 50 | + |
| 51 | +// Close frees all resources. |
| 52 | +func (o *LLM) Close() { |
| 53 | + llama.BackendFree() |
| 54 | +} |
| 55 | + |
| 56 | +// Call calls yzma with the given prompt. |
| 57 | +func (o *LLM) Call(ctx context.Context, prompt string, options ...llms.CallOption) (string, error) { |
| 58 | + return llms.GenerateFromSinglePrompt(ctx, o, prompt, options...) |
| 59 | +} |
| 60 | + |
| 61 | +// GenerateContent implements the Model interface. |
| 62 | +func (o *LLM) GenerateContent(ctx context.Context, messages []llms.MessageContent, options ...llms.CallOption) (*llms.ContentResponse, error) { |
| 63 | + opts := llms.CallOptions{} |
| 64 | + for _, opt := range options { |
| 65 | + opt(&opts) |
| 66 | + } |
| 67 | + |
| 68 | + modelName := o.model |
| 69 | + if opts.Model != "" { |
| 70 | + modelName = opts.Model |
| 71 | + } |
| 72 | + |
| 73 | + // TODO: allow for setting any passed model params |
| 74 | + model := llama.ModelLoadFromFile(modelName, llama.ModelDefaultParams()) |
| 75 | + if model == llama.Model(0) { |
| 76 | + return nil, errors.New("unable to load model") |
| 77 | + } |
| 78 | + defer llama.ModelFree(model) |
| 79 | + |
| 80 | + // TODO: allow for setting any passed context options |
| 81 | + ctxParams := llama.ContextDefaultParams() |
| 82 | + ctxParams.NCtx = uint32(4096) |
| 83 | + ctxParams.NBatch = uint32(2048) |
| 84 | + |
| 85 | + lctx := llama.InitFromModel(model, ctxParams) |
| 86 | + if lctx == llama.Context(0) { |
| 87 | + return nil, errors.New("unable to init model") |
| 88 | + } |
| 89 | + |
| 90 | + defer llama.Free(lctx) |
| 91 | + |
| 92 | + vocab := llama.ModelGetVocab(model) |
| 93 | + |
| 94 | + temperature := defaultTemperature |
| 95 | + if opts.Temperature > 0 { |
| 96 | + temperature = opts.Temperature |
| 97 | + } |
| 98 | + topK := defaultTopK |
| 99 | + if opts.TopK > 0 { |
| 100 | + topK = opts.TopK |
| 101 | + } |
| 102 | + |
| 103 | + minP := 0.1 |
| 104 | + |
| 105 | + topP := defaultTopP |
| 106 | + if opts.TopP > 0 { |
| 107 | + topP = opts.TopP |
| 108 | + } |
| 109 | + |
| 110 | + sampler := llama.SamplerChainInit(llama.SamplerChainDefaultParams()) |
| 111 | + llama.SamplerChainAdd(sampler, llama.SamplerInitTopK(int32(topK))) |
| 112 | + llama.SamplerChainAdd(sampler, llama.SamplerInitTopP(float32(topP), 1)) |
| 113 | + llama.SamplerChainAdd(sampler, llama.SamplerInitMinP(float32(minP), 1)) |
| 114 | + llama.SamplerChainAdd(sampler, llama.SamplerInitTempExt(float32(temperature), 0, 1.0)) |
| 115 | + llama.SamplerChainAdd(sampler, llama.SamplerInitDist(llama.DefaultSeed)) |
| 116 | + |
| 117 | + // gets the default template |
| 118 | + template := llama.ModelChatTemplate(model, "") |
| 119 | + if template == "" { |
| 120 | + template = "chatml" |
| 121 | + } |
| 122 | + |
| 123 | + msgs := []llama.ChatMessage{} |
| 124 | + for _, m := range messages { |
| 125 | + p := m.Parts[0] |
| 126 | + switch pt := p.(type) { |
| 127 | + case llms.TextContent: |
| 128 | + msgs = append(msgs, llama.NewChatMessage(string(m.Role), pt.Text)) |
| 129 | + default: |
| 130 | + return nil, errors.New("only support Text parts right now") |
| 131 | + } |
| 132 | + } |
| 133 | + |
| 134 | + msg := chatTemplate(template, msgs, true) |
| 135 | + |
| 136 | + // call once to get the size of the tokens from the prompt |
| 137 | + count := llama.Tokenize(vocab, msg, nil, true, true) |
| 138 | + |
| 139 | + // now get the actual tokens |
| 140 | + tokens := make([]llama.Token, count) |
| 141 | + llama.Tokenize(vocab, msg, tokens, true, true) |
| 142 | + |
| 143 | + batch := llama.BatchGetOne(tokens) |
| 144 | + |
| 145 | + if llama.ModelHasEncoder(model) { |
| 146 | + llama.Encode(lctx, batch) |
| 147 | + |
| 148 | + start := llama.ModelDecoderStartToken(model) |
| 149 | + if start == llama.TokenNull { |
| 150 | + start = llama.VocabBOS(vocab) |
| 151 | + } |
| 152 | + |
| 153 | + batch = llama.BatchGetOne([]llama.Token{start}) |
| 154 | + } |
| 155 | + |
| 156 | + result := "" |
| 157 | + |
| 158 | + maxTokens := int32(1024) |
| 159 | + if opts.MaxTokens > 0 { |
| 160 | + maxTokens = int32(opts.MaxTokens) |
| 161 | + } |
| 162 | + |
| 163 | + for pos := int32(0); pos < maxTokens; pos += batch.NTokens { |
| 164 | + llama.Decode(lctx, batch) |
| 165 | + token := llama.SamplerSample(sampler, lctx, -1) |
| 166 | + |
| 167 | + if llama.VocabIsEOG(vocab, token) { |
| 168 | + break |
| 169 | + } |
| 170 | + |
| 171 | + buf := make([]byte, 64) |
| 172 | + len := llama.TokenToPiece(vocab, token, buf, 0, true) |
| 173 | + |
| 174 | + result = result + string(buf[:len]) |
| 175 | + batch = llama.BatchGetOne([]llama.Token{token}) |
| 176 | + } |
| 177 | + |
| 178 | + choices := []*llms.ContentChoice{ |
| 179 | + { |
| 180 | + Content: result, |
| 181 | + }, |
| 182 | + } |
| 183 | + |
| 184 | + response := &llms.ContentResponse{Choices: choices} |
| 185 | + return response, nil |
| 186 | +} |
| 187 | + |
| 188 | +func chatTemplate(template string, msgs []llama.ChatMessage, add bool) string { |
| 189 | + buf := make([]byte, 2048) |
| 190 | + len := llama.ChatApplyTemplate(template, msgs, add, buf) |
| 191 | + result := string(buf[:len]) |
| 192 | + return result |
| 193 | +} |
0 commit comments