Skip to content

Commit 21a3244

Browse files
committed
Fix a bug where we're not properly falling back to CPU.
1 parent 0458c9b commit 21a3244

File tree

1 file changed

+2
-2
lines changed

1 file changed

+2
-2
lines changed

gpt4all-chat/chatllm.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -275,8 +275,8 @@ bool ChatLLM::loadModel(const ModelInfo &modelInfo)
275275
if (requestedDevice != "CPU") {
276276
const size_t requiredMemory = m_llModelInfo.model->requiredMem(filePath.toStdString());
277277
std::vector<LLModel::GPUDevice> availableDevices = m_llModelInfo.model->availableGPUDevices(requiredMemory);
278-
if (!availableDevices.empty() && requestedDevice == "Auto" && devices.front().type == 2 /*a discrete gpu*/) {
279-
m_llModelInfo.model->initializeGPUDevice(devices.front());
278+
if (!availableDevices.empty() && requestedDevice == "Auto" && availableDevices.front().type == 2 /*a discrete gpu*/) {
279+
m_llModelInfo.model->initializeGPUDevice(availableDevices.front());
280280
} else {
281281
for (LLModel::GPUDevice &d : availableDevices) {
282282
if (QString::fromStdString(d.name) == requestedDevice) {

0 commit comments

Comments
 (0)