LlamaIndex integration for DigitalOcean Gradient AI with full support for function/tool calling.
pip install llama-index-llms-digitalocean-gradientaiThis package uses the official gradient SDK (PyPI package: gradient) under the hood; it is installed automatically as a dependency.
from llama_index.llms.digitalocean.gradientai import GradientAI
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
response = llm.complete("What is DigitalOcean Gradient AI Platform?")
print(response.text)from llama_index.core.llms import ChatMessage
from llama_index.llms.digitalocean.gradientai import GradientAI
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
messages = [
ChatMessage(role="system", content="You are a helpful assistant."),
ChatMessage(role="user", content="What is the capital of France?")
]
response = llm.chat(messages)
print(response.message.content)from llama_index.llms.digitalocean.gradientai import GradientAI
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
# Streaming completion
for chunk in llm.stream_complete("Tell me a story about AI:"):
print(chunk.delta, end="", flush=True)import asyncio
from llama_index.llms.digitalocean.gradientai import GradientAI
async def main():
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
response = await llm.acomplete("What is Gradient?")
print(response.text)
asyncio.run(main())This integration supports OpenAI-compatible function calling, enabling the LLM to invoke tools based on user queries.
from llama_index.llms.digitalocean.gradientai import GradientAI
from llama_index.core.tools import FunctionTool
# Define tools
def add(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
def multiply(a: int, b: int) -> int:
"""Multiply two numbers together."""
return a * b
# Create tool instances
add_tool = FunctionTool.from_defaults(fn=add)
multiply_tool = FunctionTool.from_defaults(fn=multiply)
tools = [add_tool, multiply_tool]
# Initialize LLM
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
# Chat with tools
response = llm.chat_with_tools(
tools=tools,
user_msg="What is 5 multiplied by 8?",
)
print(response.message)
# Extract tool calls from response
tool_calls = llm.get_tool_calls_from_response(
response,
error_on_no_tool_call=False
)
for tool_call in tool_calls:
print(f"Tool: {tool_call.tool_name}, Args: {tool_call.tool_kwargs}")For automatic tool execution and result handling:
from llama_index.llms.digitalocean.gradientai import GradientAI
from llama_index.core.tools import FunctionTool
def add(a: int, b: int) -> int:
"""Add two numbers together."""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
# Automatically calls the tool and returns the result
response = llm.predict_and_call(
tools=[add_tool],
user_msg="What is 10 plus 15?",
)
print(response) # Output: 25import asyncio
from llama_index.llms.digitalocean.gradientai import GradientAI
from llama_index.core.tools import FunctionTool
def multiply(a: int, b: int) -> int:
"""Multiply two numbers together."""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
async def main():
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
response = await llm.achat_with_tools(
tools=[multiply_tool],
user_msg="What is 7 times 9?",
)
print(response.message)
asyncio.run(main())Use GradientAI with LlamaIndex's RAG capabilities to query your own documents.
Setup: Create a data/ folder and add your text files (.txt, .pdf, .docx, etc.):
your_project/
├── data/
│ ├── document1.txt
│ ├── document2.pdf
│ └── ...
└── rag_example.py
Example:
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
from llama_index.llms.digitalocean.gradientai import GradientAI
from llama_index.embeddings.fastembed import FastEmbedEmbedding
# Initialize the DigitalOcean Gradient AI LLM
llm = GradientAI(
model="openai-gpt-oss-120b",
model_access_key="your-api-key",
)
# Use FastEmbed embeddings (lightweight, runs locally, no API key needed)
embed_model = FastEmbedEmbedding(model_name="BAAI/bge-small-en-v1.5")
# Configure LlamaIndex settings
Settings.llm = llm
Settings.embed_model = embed_model
# Load documents and build index
documents = SimpleDirectoryReader("data").load_data()
index = VectorStoreIndex.from_documents(documents)
# Query index
query_engine = index.as_query_engine()
response = query_engine.query("Your question about the documents?")
print(response)Note: Install the FastEmbed embeddings package:
pip install fastembed
SimpleDirectoryReadersupports many file types including.txt,.docx,.csv,.md, and more.
MIT