Skip to content

Commit

Permalink
Add singleheader and tests for generation with context.
Browse files Browse the repository at this point in the history
  • Loading branch information
jmont-dev committed Aug 18, 2024
1 parent 7de8114 commit de85c5b
Show file tree
Hide file tree
Showing 2 changed files with 43 additions and 1 deletion.
26 changes: 25 additions & 1 deletion singleheader/ollama.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -35171,6 +35171,13 @@ class Ollama
Ollama(): Ollama("http://localhost:11434") {}
~Ollama() { delete this->cli; }

ollama::response generate(const std::string& model,const std::string& prompt, const ollama::response& context, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
ollama::request request(model, prompt, options, false, images);
if ( context.as_json().contains("context") ) request["context"] = context.as_json()["context"];
return generate(request);
}

ollama::response generate(const std::string& model,const std::string& prompt, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
ollama::request request(model, prompt, options, false, images);
Expand Down Expand Up @@ -35201,6 +35208,13 @@ class Ollama
return response;
}

bool generate(const std::string& model,const std::string& prompt, ollama::response& context, std::function<void(const ollama::response&)> on_receive_token, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
ollama::request request(model, prompt, options, true, images);
if ( context.as_json().contains("context") ) request["context"] = context.as_json()["context"];
return generate(request, on_receive_token);
}

bool generate(const std::string& model,const std::string& prompt, std::function<void(const ollama::response&)> on_receive_token, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
ollama::request request(model, prompt, options, true, images);
Expand Down Expand Up @@ -35640,11 +35654,16 @@ namespace ollama
ollama.setServerURL(server_url);
}

inline ollama::response generate(const std::string& model,const std::string& prompt,const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
inline ollama::response generate(const std::string& model, const std::string& prompt, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
return ollama.generate(model, prompt, options, images);
}

ollama::response generate(const std::string& model,const std::string& prompt, const ollama::response& context, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
return ollama.generate(model, prompt, context, options, images);
}

inline ollama::response generate(const ollama::request& request)
{
return ollama.generate(request);
Expand All @@ -35655,6 +35674,11 @@ namespace ollama
return ollama.generate(model, prompt, on_receive_response, options, images);
}

inline bool generate(const std::string& model,const std::string& prompt, ollama::response& context, std::function<void(const ollama::response&)> on_receive_response, const json& options=nullptr, const std::vector<std::string>& images=std::vector<std::string>())
{
return ollama.generate(model, prompt, context, on_receive_response, options, images);
}

inline bool generate(ollama::request& request, std::function<void(const ollama::response&)> on_receive_response)
{
return ollama.generate(request, on_receive_response);
Expand Down
18 changes: 18 additions & 0 deletions test/test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,14 @@ TEST_SUITE("Ollama Tests") {
CHECK( response.as_json().contains("response") == true );
}

TEST_CASE("Generation with Context") {

ollama::response context = ollama::generate(test_model, "Why is the sky blue?", options);

ollama::response response = ollama::generate(test_model, "Tell me more about this.", context, options);

CHECK( response.as_json().contains("response") == true );
}

std::atomic<bool> done{false};
std::string streamed_response;
Expand All @@ -130,6 +138,16 @@ TEST_SUITE("Ollama Tests") {
CHECK( streamed_response != "" );
}

TEST_CASE("Streaming Generation with Context") {

ollama::response context = ollama::generate(test_model, "Why is the sky blue?", options);

std::function<void(const ollama::response&)> response_callback = on_receive_response;
ollama::generate(test_model, "Tell me more about this.", context, response_callback, options);

CHECK( streamed_response!="" );
}

TEST_CASE("Non-Singleton Generation") {

Ollama my_ollama_server("http://localhost:11434");
Expand Down

0 comments on commit de85c5b

Please sign in to comment.