Skip to content

Gemini #100

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft
wants to merge 8 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 10 additions & 6 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -65,12 +65,13 @@
"@jupyterlab/rendermime": "^4.4.0",
"@jupyterlab/settingregistry": "^4.4.0",
"@jupyterlab/ui-components": "^4.4.0",
"@langchain/anthropic": "^0.3.9",
"@langchain/community": "^0.3.44",
"@langchain/core": "^0.3.57",
"@langchain/mistralai": "^0.1.1",
"@langchain/ollama": "^0.2.0",
"@langchain/openai": "^0.4.4",
"@langchain/anthropic": "^0.3.22",
"@langchain/community": "^0.3.46",
"@langchain/core": "^0.3.58",
"@langchain/google-genai": "^0.2.12",
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The latest version of @langchain/google-genai seems to depend on newer releases of @langchain/core, so bumping the other langchain dependencies here.

"@langchain/mistralai": "^0.2.1",
"@langchain/ollama": "^0.2.2",
"@langchain/openai": "^0.5.13",
"@lumino/coreutils": "^2.1.2",
"@lumino/polling": "^2.1.2",
"@lumino/signaling": "^2.1.2",
Expand Down Expand Up @@ -115,6 +116,9 @@
"typescript": "~5.8.3",
"yjs": "^13.5.0"
},
"resolutions": {
"zod": "^3.25.56"
},
"sideEffects": [
"style/*.css",
"style/index.js"
Expand Down
4 changes: 4 additions & 0 deletions scripts/settings-checker.js
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,10 @@ const providers = {
type: 'ChromeAIInputs',
excludedProps: ['systemPrompt']
},
ChatGoogleGenerativeAI: {
path: 'node_modules/@langchain/google-genai/dist/chat_models.d.ts',
type: 'ChatGoogleGenerativeAI'
},
MistralAI: {
path: 'node_modules/@langchain/mistralai/dist/chat_models.d.ts',
type: 'ChatMistralAIInput'
Expand Down
61 changes: 61 additions & 0 deletions src/default-providers/Gemini/completer.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import {
CompletionHandler,
IInlineCompletionContext
} from '@jupyterlab/completer';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { AIMessage, SystemMessage } from '@langchain/core/messages';

import { BaseCompleter } from '../../base-completer';

export class GeminiCompleter extends BaseCompleter {
constructor(options: BaseCompleter.IOptions) {
super(options);
this._completer = new ChatGoogleGenerativeAI({
model: 'gemini-pro',
...options.settings
});
}

async fetch(
request: CompletionHandler.IRequest,
context: IInlineCompletionContext
) {
const { text, offset: cursorOffset } = request;
const prompt = text.slice(0, cursorOffset);

const trimmedPrompt = prompt.trim();

const messages = [
new SystemMessage(this.systemPrompt),
new AIMessage(trimmedPrompt)
];

try {
const response = await this._completer.invoke(messages);
const items = [];

// Gemini can return string or complex content, a list of string/images/other.
if (typeof response.content === 'string') {
items.push({
insertText: response.content
});
} else {
response.content.forEach(content => {
if (content.type !== 'text') {
return;
}
items.push({
insertText: content.text,
filterText: prompt.substring(trimmedPrompt.length)
});
});
}
return { items };
} catch (error) {
console.error('Error fetching completions', error);
return { items: [] };
}
}

protected _completer: ChatGoogleGenerativeAI;
}
9 changes: 9 additions & 0 deletions src/default-providers/Gemini/instructions.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
export default `
<i class="fas fa-exclamation-triangle"></i> This extension is still very much experimental. It is not an official Google extension.

1. Go to <https://aistudio.google.com> and create an API key.

2. Open the JupyterLab settings and go to the **Ai providers** section to select the \`Gemini\`
provider and add your API key (required).
3. Open the chat, or use the inline completer.
`;
64 changes: 64 additions & 0 deletions src/default-providers/Gemini/settings-schema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"temperature": {
"type": "number",
"description": "Amount of randomness injected into the response. Ranges from 0 to 1. Use temp closer to 0 for analytical / multiple choice, and temp closer to 1 for creative and generative tasks."
},
"topK": {
"type": "number",
"description": "Only sample from the top K options for each subsequent token. Used to remove \"long tail\" low probability responses."
},
"topP": {
"type": "number",
"description": "Nucleus sampling parameter. Only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation."
},
"maxOutputTokens": {
"type": "number",
"description": "The maximum number of tokens to generate in the response."
},
"stopSequences": {
"type": "array",
"items": {
"type": "string"
},
"description": "A list of strings upon which to stop generating. You probably want something like [\"\\n\\nHuman:\"] for chat conversations."
},
"streaming": {
"type": "boolean",
"description": "Whether to stream the results or not"
},
"apiKey": {
"type": "string",
"description": "Google AI Studio API key"
},
"model": {
"type": "string",
"description": "Model name to use (e.g., gemini-pro, gemini-2.0-flash, etc.)",
"default": "gemini-pro"
},
"baseURL": {
"type": "string",
"description": "Base URL for the Google AI API"
},
"safetySettings": {
"type": "array",
"description": "Safety settings for content filtering",
"items": {
"type": "object",
"properties": {
"category": {
"type": "string"
},
"threshold": {
"type": "string"
}
}
}
}
},
"additionalProperties": false,
"description": "Input to Google Generative AI Chat class.",
"definitions": {}
}
20 changes: 17 additions & 3 deletions src/default-providers/MistralAI/completer.ts
Original file line number Diff line number Diff line change
Expand Up @@ -36,9 +36,23 @@ export class CodestralCompleter extends BaseCompleter {
false
);
const items = response.choices.map(choice => {
const content = choice.message.content
.replace(CODE_BLOCK_START_REGEX, '')
.replace(CODE_BLOCK_END_REGEX, '');
const messageContent = choice.message.content;
let content = '';

if (typeof messageContent === 'string') {
content = messageContent
.replace(CODE_BLOCK_START_REGEX, '')
.replace(CODE_BLOCK_END_REGEX, '');
} else if (Array.isArray(messageContent)) {
// Handle ContentChunk[] case - extract text content
content = messageContent
.filter(chunk => chunk.type === 'text')
.map(chunk => chunk.text || '')
.join('')
.replace(CODE_BLOCK_START_REGEX, '')
.replace(CODE_BLOCK_END_REGEX, '');
}

return {
insertText: content
};
Expand Down
11 changes: 11 additions & 0 deletions src/default-providers/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,13 +7,15 @@ import { Notification } from '@jupyterlab/apputils';
import { ChatAnthropic } from '@langchain/anthropic';
import { ChatWebLLM } from '@langchain/community/chat_models/webllm';
import { ChromeAI } from '@langchain/community/experimental/llms/chrome_ai';
import { ChatGoogleGenerativeAI } from '@langchain/google-genai';
import { ChatMistralAI } from '@langchain/mistralai';
import { ChatOllama } from '@langchain/ollama';
import { ChatOpenAI } from '@langchain/openai';

// Import completers
import { AnthropicCompleter } from './Anthropic/completer';
import { ChromeCompleter } from './ChromeAI/completer';
import { GeminiCompleter } from './Gemini/completer';
import { CodestralCompleter } from './MistralAI/completer';
import { OllamaCompleter } from './Ollama/completer';
import { OpenAICompleter } from './OpenAI/completer';
Expand All @@ -22,6 +24,7 @@ import { WebLLMCompleter } from './WebLLM/completer';
// Import Settings
import AnthropicSettings from './Anthropic/settings-schema.json';
import ChromeAISettings from './ChromeAI/settings-schema.json';
import GeminiSettings from './Gemini/settings-schema.json';
import MistralAISettings from './MistralAI/settings-schema.json';
import OllamaAISettings from './Ollama/settings-schema.json';
import OpenAISettings from './OpenAI/settings-schema.json';
Expand All @@ -31,6 +34,7 @@ import WebLLMSettings from './WebLLM/settings-schema.json';
import ChromeAIInstructions, {
compatibilityCheck as chromeAICompatibilityCheck
} from './ChromeAI/instructions';
import GeminiInstructions from './Gemini/instructions';
import MistralAIInstructions from './MistralAI/instructions';
import OllamaInstructions from './Ollama/instructions';
import WebLLMInstructions, {
Expand Down Expand Up @@ -74,6 +78,13 @@ const AIProviders: IAIProvider[] = [
instructions: OllamaInstructions,
settingsSchema: OllamaAISettings
},
{
name: 'Gemini',
chat: ChatGoogleGenerativeAI,
completer: GeminiCompleter,
instructions: GeminiInstructions,
settingsSchema: GeminiSettings
},
{
name: 'OpenAI',
chat: ChatOpenAI,
Expand Down
Loading
Loading