Skip to content

Commit

Permalink
Fix a crash that can happen when erasing a conversation during active…
Browse files Browse the repository at this point in the history
… generation.

Signed-off-by: Adam Treat <[email protected]>
  • Loading branch information
manyoso committed Dec 19, 2024
1 parent 056da5a commit fb861f9
Show file tree
Hide file tree
Showing 3 changed files with 22 additions and 15 deletions.
7 changes: 2 additions & 5 deletions gpt4all-chat/src/chat.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,15 +240,12 @@ void Chat::responseStopped(qint64 promptResponseMs)
emit responseInProgressChanged();
emit responseStateChanged();

const int index = m_chatModel->count() - 1;
ChatItem *item = m_chatModel->get(index);

const QString possibleToolcall = item->toolCallValue();
const QString possibleToolcall = m_chatModel->possibleToolcall();

ToolCallParser parser;
parser.update(possibleToolcall);

if (item->type() == ChatItem::Type::Response && parser.state() == ToolEnums::ParseState::Complete) {
if (parser.state() == ToolEnums::ParseState::Complete) {
const QString toolCall = parser.toolCall();

// Regex to remove the formatting around the code
Expand Down
20 changes: 15 additions & 5 deletions gpt4all-chat/src/chatllm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -740,7 +740,9 @@ std::vector<MessageItem> ChatLLM::forkConversation(const QString &prompt) const
std::vector<MessageItem> conversation;
{
auto items = m_chatModel->messageItems();
Q_ASSERT(items.size() >= 2); // should be prompt/response pairs
// It is possible the main thread could have erased the conversation while the llm thread,
// is busy forking the conversatoin but it must have set stop generating first
Q_ASSERT(items.size() >= 2 || m_stopGenerating); // should be prompt/response pairs
conversation.reserve(items.size() + 1);
conversation.assign(items.begin(), items.end());
}
Expand Down Expand Up @@ -960,10 +962,18 @@ auto ChatLLM::promptInternal(
result.response.append(piece.data(), piece.size());
auto respStr = QString::fromUtf8(result.response);

if (toolCallParser.hasSplit())
m_chatModel->setResponseValue(toolCallParser.buffer());
else
m_chatModel->setResponseValue(removeLeadingWhitespace(respStr));
try {
if (toolCallParser.hasSplit())
m_chatModel->setResponseValue(toolCallParser.buffer());
else
m_chatModel->setResponseValue(removeLeadingWhitespace(respStr));
} catch (const std::exception &e) {
// We have a try/catch here because the main thread might have removed the response from
// the chatmodel by erasing the conversation during the response... the main thread sets
// m_stopGenerating before doing so, but it doesn't wait after that to reset the chatmodel
Q_ASSERT(m_stopGenerating);
return false;
}

emit responseChanged();

Expand Down
10 changes: 5 additions & 5 deletions gpt4all-chat/src/chatmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -305,10 +305,10 @@ class ChatItem : public QObject
return items;
}

QString toolCallValue() const
QString possibleToolCall() const
{
if (!subItems.empty())
return subItems.back()->toolCallValue();
return subItems.back()->possibleToolCall();
if (type() == Type::ToolCall)
return value;
else
Expand Down Expand Up @@ -735,11 +735,11 @@ class ChatModel : public QAbstractListModel
emit hasErrorChanged(false);
}

Q_INVOKABLE ChatItem *get(int index)
Q_INVOKABLE QString possibleToolcall() const
{
QMutexLocker locker(&m_mutex);
if (index < 0 || index >= m_chatItems.size()) return nullptr;
return m_chatItems.at(index);
if (m_chatItems.empty()) return QString();
return m_chatItems.back()->possibleToolCall();
}

Q_INVOKABLE void updateCurrentResponse(int index, bool b)
Expand Down

0 comments on commit fb861f9

Please sign in to comment.