|
119 | 119 | },
|
120 | 120 | "temperature": {
|
121 | 121 | "type": "number",
|
122 |
| - "description": "Temperature of the OpenAI model.", |
| 122 | + "description": "Temperature of the OpenAI model. Should be in range [0, 2], otherwise an error will be produced.", |
123 | 123 | "default": 1
|
124 | 124 | },
|
125 | 125 | "apiKey": {
|
|
306 | 306 | },
|
307 | 307 | "temperature": {
|
308 | 308 | "type": "number",
|
309 |
| - "description": "Temperature of the LM Studio model.", |
| 309 | + "description": "Temperature of the LM Studio model. Should be in range [0, 2], otherwise an error will be produced.", |
310 | 310 | "default": 1
|
311 | 311 | },
|
312 | 312 | "port": {
|
|
376 | 376 | "markdownDescription": "List of configurations that fetch completions from a locally running LLM inside [LM Studio](https://lmstudio.ai).",
|
377 | 377 | "order": 3
|
378 | 378 | },
|
| 379 | + "coqpilot.deepSeekModelsParameters": { |
| 380 | + "type": "array", |
| 381 | + "items": { |
| 382 | + "type": "object", |
| 383 | + "properties": { |
| 384 | + "modelId": { |
| 385 | + "type": "string", |
| 386 | + "markdownDescription": "Unique identifier of this model to distinguish it from others. Could be any string.", |
| 387 | + "default": "deep-seek-v3" |
| 388 | + }, |
| 389 | + "modelName": { |
| 390 | + "type": "string", |
| 391 | + "markdownDescription": "Model to use from the DeepSeek public API. List of models known to CoqPilot: \n * deepseek-chat \n * deepseek-reasoner", |
| 392 | + "default": "deepseek-chat" |
| 393 | + }, |
| 394 | + "temperature": { |
| 395 | + "type": "number", |
| 396 | + "description": "Temperature of the DeepSeek model. Should be in range [0, 2], otherwise an error will be produced.", |
| 397 | + "default": 1 |
| 398 | + }, |
| 399 | + "apiKey": { |
| 400 | + "type": "string", |
| 401 | + "description": "Api key to communicate with the DeepSeek api. You can get one [here](https://platform.deepseek.com/api_keys).", |
| 402 | + "default": "None" |
| 403 | + }, |
| 404 | + "choices": { |
| 405 | + "type": "number", |
| 406 | + "description": "Number of attempts to generate proof for one hole with this model. All attempts are made as a single request, so this parameter should not have a significant impact on performance. However, more choices mean more tokens spent on generation.", |
| 407 | + "default": 15 |
| 408 | + }, |
| 409 | + "systemPrompt": { |
| 410 | + "type": "string", |
| 411 | + "description": "Prompt for the DeepSeek model to begin a chat with. It is sent as a system message, which means it has more impact than other messages.", |
| 412 | + "default": "Generate proof of the theorem from user input in Coq. You should only generate proofs in Coq. Never add special comments to the proof. Your answer should be a valid Coq proof. It should start with 'Proof.' and end with 'Qed.'." |
| 413 | + }, |
| 414 | + "maxTokensToGenerate": { |
| 415 | + "type": "number", |
| 416 | + "description": "Number of tokens that the model is allowed to generate as a response message (i.e. message with proof).", |
| 417 | + "default": 2048 |
| 418 | + }, |
| 419 | + "tokensLimit": { |
| 420 | + "type": "number", |
| 421 | + "description": "Total length of input and generated tokens, it is determined by the model.", |
| 422 | + "default": 4096 |
| 423 | + }, |
| 424 | + "maxContextTheoremsNumber": { |
| 425 | + "type": "number", |
| 426 | + "description": "Maximum number of context theorems to include in the prompt sent to the DeepSeek model as examples for proof generation. Lower values reduce token usage but may decrease the likelihood of generating correct proofs.", |
| 427 | + "default": 100 |
| 428 | + }, |
| 429 | + "multiroundProfile": { |
| 430 | + "type": "object", |
| 431 | + "properties": { |
| 432 | + "maxRoundsNumber": { |
| 433 | + "type": "number", |
| 434 | + "description": "Maximum number of rounds to generate and further fix the proof. Default value is 1, which means each proof will be only generated, but not fixed.", |
| 435 | + "default": 1 |
| 436 | + }, |
| 437 | + "proofFixChoices": { |
| 438 | + "type": "number", |
| 439 | + "description": "Number of attempts to generate a proof fix for each proof in one round. Warning: increasing `proofFixChoices` can lead to exponential growth in generation requests if `maxRoundsNumber` is relatively large.", |
| 440 | + "default": 1 |
| 441 | + }, |
| 442 | + "proofFixPrompt": { |
| 443 | + "type": "string", |
| 444 | + "description": "Prompt for the proof-fix request that will be sent as a user chat message in response to an incorrect proof. It may include the `${diagnostic}` substring, which will be replaced by the actual compiler diagnostic.", |
| 445 | + "default": "Unfortunately, the last proof is not correct. Here is the compiler's feedback: `${diagnostic}`. Please, fix the proof." |
| 446 | + }, |
| 447 | + "maxPreviousProofVersionsNumber": { |
| 448 | + "type": "number", |
| 449 | + "description": "Maximum number of previous proof versions to include in the proof-fix chat, each presented as a dialogue: the user's diagnostic followed by the assistant's corresponding proof attempt. The most recent proof version being fixed is always included and is not affected by this parameter.", |
| 450 | + "default": 100 |
| 451 | + } |
| 452 | + }, |
| 453 | + "default": { |
| 454 | + "maxRoundsNumber": 1, |
| 455 | + "proofFixChoices": 1, |
| 456 | + "proofFixPrompt": "Unfortunately, the last proof is not correct. Here is the compiler's feedback: `${diagnostic}`. Please, fix the proof.", |
| 457 | + "maxPreviousProofVersionsNumber": 100 |
| 458 | + } |
| 459 | + } |
| 460 | + } |
| 461 | + }, |
| 462 | + "default": [], |
| 463 | + "markdownDescription": "List of configurations for DeepSeek models. Each configuration will be fetched for completions independently in the order they are listed.", |
| 464 | + "order": 4 |
| 465 | + }, |
379 | 466 | "coqpilot.contextTheoremsRankerType": {
|
380 | 467 | "type": "string",
|
381 | 468 | "enum": [
|
|
390 | 477 | ],
|
391 | 478 | "description": "Context of the LLM is limited. Usually not all theorems from the file may be used in the completion request. This parameter defines the way theorems are selected for the completion.",
|
392 | 479 | "default": "distance",
|
393 |
| - "order": 4 |
| 480 | + "order": 5 |
394 | 481 | },
|
395 | 482 | "coqpilot.loggingVerbosity": {
|
396 | 483 | "type": "string",
|
|
404 | 491 | ],
|
405 | 492 | "description": "The verbosity of the logs.",
|
406 | 493 | "default": "info",
|
407 |
| - "order": 5 |
| 494 | + "order": 6 |
408 | 495 | },
|
409 | 496 | "coqpilot.coqLspServerPath": {
|
410 | 497 | "type": "string",
|
411 | 498 | "description": "Path to the Coq LSP server. If not specified, CoqPilot will try to find the server automatically at the default location: coq-lsp at PATH.",
|
412 | 499 | "default": "coq-lsp",
|
413 |
| - "order": 6 |
| 500 | + "order": 7 |
414 | 501 | }
|
415 | 502 | }
|
416 | 503 | }
|
|
0 commit comments