Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions common/common.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,44 @@
function(gpt4all_add_warning_options target)
if (MSVC)
return()
endif()
target_compile_options("${target}" PRIVATE
# base options
-Wall
-Wextra
# extra options
-Wcast-align
-Wextra-semi
-Wformat=2
-Wmissing-include-dirs
-Wnull-dereference
-Wstrict-overflow=2
-Wvla
# errors
-Werror=format-security
-Werror=init-self
-Werror=pointer-arith
-Werror=undef
# disabled warnings
-Wno-sign-compare
-Wno-unused-parameter
-Wno-unused-function
-Wno-unused-variable
)
if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
target_compile_options("${target}" PRIVATE
-Wduplicated-branches
-Wduplicated-cond
-Wlogical-op
-Wno-reorder
-Wno-null-dereference
)
elseif (CMAKE_CXX_COMPILER_ID MATCHES "^(Apple)?Clang$")
target_compile_options("${target}" PRIVATE
-Wunreachable-code-break
-Wunreachable-code-return
-Werror=pointer-integer-compare
-Wno-reorder-ctor
)
endif()
endfunction()
7 changes: 5 additions & 2 deletions gpt4all-backend/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
cmake_minimum_required(VERSION 3.23) # for FILE_SET

include(../common/common.cmake)

set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

Expand Down Expand Up @@ -94,8 +97,6 @@ if (LLMODEL_ROCM)
list(APPEND BUILD_VARIANTS rocm rocm-avxonly)
endif()

set(CMAKE_VERBOSE_MAKEFILE ON)

# Go through each build variant
foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
# Determine flags
Expand Down Expand Up @@ -151,6 +152,7 @@ foreach(BUILD_VARIANT IN LISTS BUILD_VARIANTS)
# Add each individual implementations
add_library(llamamodel-mainline-${BUILD_VARIANT} SHARED
src/llamamodel.cpp src/llmodel_shared.cpp)
gpt4all_add_warning_options(llamamodel-mainline-${BUILD_VARIANT})
target_compile_definitions(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
LLAMA_VERSIONS=>=3 LLAMA_DATE=999999)
target_include_directories(llamamodel-mainline-${BUILD_VARIANT} PRIVATE
Expand All @@ -169,6 +171,7 @@ add_library(llmodel
src/llmodel_c.cpp
src/llmodel_shared.cpp
)
gpt4all_add_warning_options(llmodel)
target_sources(llmodel PUBLIC
FILE_SET public_headers TYPE HEADERS BASE_DIRS include
FILES include/gpt4all-backend/llmodel.h
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-backend/include/gpt4all-backend/llmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@ class LLModel {
virtual bool supportsEmbedding() const = 0;
virtual bool supportsCompletion() const = 0;
virtual bool loadModel(const std::string &modelPath, int n_ctx, int ngl) = 0;
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; };
virtual bool isModelBlacklisted(const std::string &modelPath) const { (void)modelPath; return false; }
virtual bool isEmbeddingModel(const std::string &modelPath) const { (void)modelPath; return false; }
virtual bool isModelLoaded() const = 0;
virtual size_t requiredMem(const std::string &modelPath, int n_ctx, int ngl) = 0;
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-backend/src/llmodel_shared.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -260,7 +260,7 @@ void LLModel::generateResponse(std::function<bool(int32_t, const std::string&)>
cachedTokens.push_back(new_tok.value());
cachedResponse += new_piece;

auto accept = [this, &promptCtx, &cachedTokens, &new_tok, allowContextShift]() -> bool {
auto accept = [this, &promptCtx, &new_tok, allowContextShift]() -> bool {
// Shift context if out of space
if (promptCtx.n_past >= promptCtx.n_ctx) {
(void)allowContextShift;
Expand Down
3 changes: 3 additions & 0 deletions gpt4all-chat/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
cmake_minimum_required(VERSION 3.25) # for try_compile SOURCE_FROM_VAR

include(../common/common.cmake)

set(APP_VERSION_MAJOR 3)
set(APP_VERSION_MINOR 4)
set(APP_VERSION_PATCH 0)
Expand Down Expand Up @@ -157,6 +159,7 @@ qt_add_executable(chat
src/xlsxtomd.cpp src/xlsxtomd.h
${CHAT_EXE_RESOURCES}
)
gpt4all_add_warning_options(chat)

qt_add_qml_module(chat
URI gpt4all
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-chat/src/chat.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ class Chat : public QObject
Q_PROPERTY(ResponseState responseState READ responseState NOTIFY responseStateChanged)
Q_PROPERTY(QList<QString> collectionList READ collectionList NOTIFY collectionListChanged)
Q_PROPERTY(QString modelLoadingError READ modelLoadingError NOTIFY modelLoadingErrorChanged)
Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged);
Q_PROPERTY(QString tokenSpeed READ tokenSpeed NOTIFY tokenSpeedChanged)
Q_PROPERTY(QString deviceBackend READ deviceBackend NOTIFY loadedModelInfoChanged)
Q_PROPERTY(QString device READ device NOTIFY loadedModelInfoChanged)
Q_PROPERTY(QString fallbackReason READ fallbackReason NOTIFY loadedModelInfoChanged)
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-chat/src/chatllm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -585,7 +585,7 @@ bool ChatLLM::loadNewModel(const ModelInfo &modelInfo, QVariantMap &modelLoadPro

modelLoadProps.insert("$duration", modelLoadTimer.elapsed() / 1000.);
return true;
};
}

bool ChatLLM::isModelLoaded() const
{
Expand Down
4 changes: 2 additions & 2 deletions gpt4all-chat/src/chatmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,8 @@ struct ChatItem
Q_PROPERTY(bool thumbsDownState MEMBER thumbsDownState)
Q_PROPERTY(QList<ResultInfo> sources MEMBER sources)
Q_PROPERTY(QList<ResultInfo> consolidatedSources MEMBER consolidatedSources)
Q_PROPERTY(QList<PromptAttachment> promptAttachments MEMBER promptAttachments);
Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments);
Q_PROPERTY(QList<PromptAttachment> promptAttachments MEMBER promptAttachments)
Q_PROPERTY(QString promptPlusAttachments READ promptPlusAttachments)

public:
QString promptPlusAttachments() const
Expand Down
12 changes: 7 additions & 5 deletions gpt4all-chat/src/database.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -296,10 +296,12 @@ static bool selectAllUncompletedChunks(QSqlQuery &q, QHash<IncompleteChunk, QStr
while (q.next()) {
QString collection = q.value(0).toString();
IncompleteChunk ic {
/*embedding_model =*/ q.value(1).toString(),
/*chunk_id =*/ q.value(2).toInt(),
/*folder_id =*/ q.value(3).toInt(),
/*text =*/ q.value(4).toString(),
/*EmbeddingKey*/ {
.embedding_model = q.value(1).toString(),
.chunk_id = q.value(2).toInt(),
},
/*folder_id =*/ q.value(3).toInt(),
/*text =*/ q.value(4).toString(),
};
chunks[ic] << collection;
}
Expand Down Expand Up @@ -1659,7 +1661,7 @@ void Database::scanQueue()
if (info.isPdf()) {
QPdfDocument doc;
if (doc.load(document_path) != QPdfDocument::Error::None) {
qWarning() << "ERROR: Could not load pdf" << document_id << document_path;;
qWarning() << "ERROR: Could not load pdf" << document_id << document_path;
return updateFolderToIndex(folder_id, countForFolder);
}
title = doc.metaData(QPdfDocument::MetaDataField::Title).toString();
Expand Down
1 change: 0 additions & 1 deletion gpt4all-chat/src/database.h
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,6 @@ class ChunkStreamer {
QString m_author;
QString m_subject;
QString m_keywords;
bool m_atStart;

// working state
QString m_chunk; // has a trailing space for convenience
Expand Down
10 changes: 5 additions & 5 deletions gpt4all-chat/src/modellist.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -502,7 +502,7 @@ ModelList::ModelList()
connect(MySettings::globalInstance(), &MySettings::contextLengthChanged, this, &ModelList::updateDataForSettings);
connect(MySettings::globalInstance(), &MySettings::gpuLayersChanged, this, &ModelList::updateDataForSettings);
connect(MySettings::globalInstance(), &MySettings::repeatPenaltyChanged, this, &ModelList::updateDataForSettings);
connect(MySettings::globalInstance(), &MySettings::repeatPenaltyTokensChanged, this, &ModelList::updateDataForSettings);;
connect(MySettings::globalInstance(), &MySettings::repeatPenaltyTokensChanged, this, &ModelList::updateDataForSettings);
connect(MySettings::globalInstance(), &MySettings::promptTemplateChanged, this, &ModelList::updateDataForSettings);
connect(MySettings::globalInstance(), &MySettings::systemPromptChanged, this, &ModelList::updateDataForSettings);
connect(&m_networkManager, &QNetworkAccessManager::sslErrors, this, &ModelList::handleSslErrors);
Expand All @@ -518,12 +518,12 @@ QString ModelList::compatibleModelNameHash(QUrl baseUrl, QString modelName) {
QCryptographicHash sha256(QCryptographicHash::Sha256);
sha256.addData((baseUrl.toString() + "_" + modelName).toUtf8());
return sha256.result().toHex();
};
}

QString ModelList::compatibleModelFilename(QUrl baseUrl, QString modelName) {
QString hash(compatibleModelNameHash(baseUrl, modelName));
return QString(u"gpt4all-%1-capi.rmodel"_s).arg(hash);
};
}

bool ModelList::eventFilter(QObject *obj, QEvent *ev)
{
Expand Down Expand Up @@ -2100,7 +2100,7 @@ void ModelList::parseDiscoveryJsonFile(const QByteArray &jsonData)
emit discoverProgressChanged();
if (!m_discoverNumberOfResults) {
m_discoverInProgress = false;
emit discoverInProgressChanged();;
emit discoverInProgressChanged();
}
}

Expand Down Expand Up @@ -2178,7 +2178,7 @@ void ModelList::handleDiscoveryItemFinished()
if (discoverProgress() >= 1.0) {
emit layoutChanged();
m_discoverInProgress = false;
emit discoverInProgressChanged();;
emit discoverInProgressChanged();
}

reply->deleteLater();
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-chat/src/mysettings.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ void MySettings::restoreModelDefaults(const ModelInfo &info)
setModelTemperature(info, info.m_temperature);
setModelTopP(info, info.m_topP);
setModelMinP(info, info.m_minP);
setModelTopK(info, info.m_topK);;
setModelTopK(info, info.m_topK);
setModelMaxLength(info, info.m_maxLength);
setModelPromptBatchSize(info, info.m_promptBatchSize);
setModelContextLength(info, info.m_contextLength);
Expand Down