diff --git a/gpt4all-chat/metadata/models3.json b/gpt4all-chat/metadata/models3.json
index 6c2444ec3b08..e93bdcfca56f 100644
--- a/gpt4all-chat/metadata/models3.json
+++ b/gpt4all-chat/metadata/models3.json
@@ -1,6 +1,22 @@
[
{
"order": "a",
+ "md5sum": "a54c08a7b90e4029a8c2ab5b5dc936aa",
+ "name": "Reasoner v1",
+ "filename": "qwen2.5-coder-7b-instruct-q4_0.gguf",
+ "filesize": "4431390720",
+ "requires": "3.5.4-dev0",
+ "ramrequired": "8",
+ "parameters": "8 billion",
+ "quant": "q4_0",
+ "type": "qwen2",
+ "description": "
",
+ "url": "https://huggingface.co/Qwen/Qwen2.5-Coder-7B-Instruct-GGUF/resolve/main/qwen2.5-coder-7b-instruct-q4_0.gguf",
+ "chatTemplate": "{{- '<|im_start|>system\\n' }}\n{% if toolList|length > 0 %}You have access to the following functions:\n{% for tool in toolList %}\nUse the function '{{tool.function}}' to: '{{tool.description}}'\n{% if tool.parameters|length > 0 %}\nparameters:\n{% for info in tool.parameters %}\n {{info.name}}:\n type: {{info.type}}\n description: {{info.description}}\n required: {{info.required}}\n{% endfor %}\n{% endif %}\n# Tool Instructions\nIf you CHOOSE to call this function ONLY reply with the following format:\n'{{tool.symbolicFormat}}'\nHere is an example. If the user says, '{{tool.examplePrompt}}', then you reply\n'{{tool.exampleCall}}'\nAfter the result you might reply with, '{{tool.exampleReply}}'\n{% endfor %}\nYou MUST include both the start and end tags when you use a function.\n\nYou are a helpful AI assistant who uses the functions to break down, analyze, perform, and verify complex reasoning tasks. You SHOULD try to verify your answers using the functions where possible.\n{% endif %}\n{{- '<|im_end|>\\n' }}\n{% for message in messages %}\n{{'<|im_start|>' + message['role'] + '\\n' + message['content'] + '<|im_end|>' + '\\n' }}\n{% endfor %}\n{% if add_generation_prompt %}\n{{ '<|im_start|>assistant\\n' }}\n{% endif %}\n",
+ "systemPrompt": ""
+ },
+ {
+ "order": "aa",
"md5sum": "c87ad09e1e4c8f9c35a5fcef52b6f1c9",
"name": "Llama 3 8B Instruct",
"filename": "Meta-Llama-3-8B-Instruct.Q4_0.gguf",
diff --git a/gpt4all-chat/qml/AddGPT4AllModelView.qml b/gpt4all-chat/qml/AddGPT4AllModelView.qml
index dd8da3ed90f5..2a1832af14fa 100644
--- a/gpt4all-chat/qml/AddGPT4AllModelView.qml
+++ b/gpt4all-chat/qml/AddGPT4AllModelView.qml
@@ -56,6 +56,52 @@ ColumnLayout {
Accessible.description: qsTr("Displayed when the models request is ongoing")
}
+ RowLayout {
+ ButtonGroup {
+ id: buttonGroup
+ exclusive: true
+ }
+ MyButton {
+ text: qsTr("All")
+ checked: true
+ borderWidth: 0
+ backgroundColor: checked ? theme.lightButtonBackground : "transparent"
+ backgroundColorHovered: theme.lighterButtonBackgroundHovered
+ backgroundRadius: 5
+ padding: 15
+ topPadding: 8
+ bottomPadding: 8
+ textColor: theme.lighterButtonForeground
+ fontPixelSize: theme.fontSizeLarge
+ fontPixelBold: true
+ checkable: true
+ ButtonGroup.group: buttonGroup
+ onClicked: {
+ ModelList.gpt4AllDownloadableModels.filter("");
+ }
+
+ }
+ MyButton {
+ text: qsTr("Reasoning")
+ borderWidth: 0
+ backgroundColor: checked ? theme.lightButtonBackground : "transparent"
+ backgroundColorHovered: theme.lighterButtonBackgroundHovered
+ backgroundRadius: 5
+ padding: 15
+ topPadding: 8
+ bottomPadding: 8
+ textColor: theme.lighterButtonForeground
+ fontPixelSize: theme.fontSizeLarge
+ fontPixelBold: true
+ checkable: true
+ ButtonGroup.group: buttonGroup
+ onClicked: {
+ ModelList.gpt4AllDownloadableModels.filter("#reasoning");
+ }
+ }
+ Layout.bottomMargin: 10
+ }
+
ScrollView {
id: scrollView
ScrollBar.vertical.policy: ScrollBar.AsNeeded
diff --git a/gpt4all-chat/src/modellist.cpp b/gpt4all-chat/src/modellist.cpp
index a6c5a620a070..36b7c55be6ba 100644
--- a/gpt4all-chat/src/modellist.cpp
+++ b/gpt4all-chat/src/modellist.cpp
@@ -473,14 +473,24 @@ GPT4AllDownloadableModels::GPT4AllDownloadableModels(QObject *parent)
connect(this, &GPT4AllDownloadableModels::modelReset, this, &GPT4AllDownloadableModels::countChanged);
}
+void GPT4AllDownloadableModels::filter(const QVector &keywords)
+{
+ m_keywords = keywords;
+ invalidateFilter();
+}
+
bool GPT4AllDownloadableModels::filterAcceptsRow(int sourceRow,
const QModelIndex &sourceParent) const
{
QModelIndex index = sourceModel()->index(sourceRow, 0, sourceParent);
- bool hasDescription = !sourceModel()->data(index, ModelList::DescriptionRole).toString().isEmpty();
+ const QString description = sourceModel()->data(index, ModelList::DescriptionRole).toString();
+ bool hasDescription = !description.isEmpty();
bool isClone = sourceModel()->data(index, ModelList::IsCloneRole).toBool();
bool isDiscovered = sourceModel()->data(index, ModelList::IsDiscoveredRole).toBool();
- return !isDiscovered && hasDescription && !isClone;
+ bool satisfiesKeyword = m_keywords.isEmpty();
+ for (const QString &k : m_keywords)
+ satisfiesKeyword = description.contains(k) ? true : satisfiesKeyword;
+ return !isDiscovered && hasDescription && !isClone && satisfiesKeyword;
}
int GPT4AllDownloadableModels::count() const
diff --git a/gpt4all-chat/src/modellist.h b/gpt4all-chat/src/modellist.h
index 0e22b931d934..0bcc97b484ee 100644
--- a/gpt4all-chat/src/modellist.h
+++ b/gpt4all-chat/src/modellist.h
@@ -302,11 +302,16 @@ class GPT4AllDownloadableModels : public QSortFilterProxyModel
explicit GPT4AllDownloadableModels(QObject *parent);
int count() const;
+ Q_INVOKABLE void filter(const QVector &keywords);
+
Q_SIGNALS:
void countChanged();
protected:
bool filterAcceptsRow(int sourceRow, const QModelIndex &sourceParent) const override;
+
+private:
+ QVector m_keywords;
};
class HuggingFaceDownloadableModels : public QSortFilterProxyModel