This repository was archived by the owner on Sep 16, 2025. It is now read-only.
-
-
Notifications
You must be signed in to change notification settings - Fork 20
Expand file tree
/
Copy pathmcp_proxy.py
More file actions
718 lines (611 loc) · 23.9 KB
/
mcp_proxy.py
File metadata and controls
718 lines (611 loc) · 23.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
# /// script
# dependencies = [
# "fastmcp",
# "httpx",
# "logging",
# "langchain_mcp_adapters",
# "langgraph",
# "langchain_ollama",
# "rich",
# "typer",
# "prompt_toolkit",
# "fastapi",
# "uvicorn",
# "langchain",
# "pydantic",
# "python-multipart",
# "aiofiles",
# "python-json-logger"
# ]
# ///
"""
Zin MCP Client - FastAPI Backend
Copyright (c) 2025 Zin MCP Client Developer(s)
"""
import json
import httpx
import logging
import asyncio
import uvicorn
from typing import List, Dict, Optional, AsyncGenerator
from contextlib import AsyncExitStack
from fastapi import FastAPI, HTTPException, WebSocket, WebSocketDisconnect
from fastapi.staticfiles import StaticFiles
from fastapi.responses import HTMLResponse, StreamingResponse, JSONResponse
from pydantic import BaseModel
from contextlib import asynccontextmanager
import time
import uuid
from fastapi.middleware.cors import CORSMiddleware
from mcp import ClientSession
from mcp.client.stdio import stdio_client, StdioServerParameters
from langchain_mcp_adapters.tools import load_mcp_tools
from langgraph.prebuilt import create_react_agent
from langchain_ollama import ChatOllama
# Setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Global variable to store config path
CONFIG_PATH = "mcp_config.json"
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup
global client
client = WebMCPClient(CONFIG_PATH)
logger.info(f"Application started with config: {CONFIG_PATH}")
yield
# Shutdown
await client.close()
# Initialize FastAPI app with lifespan
app = FastAPI(title="Zin MCP Client", version="1.0.0", lifespan=lifespan)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Global client instance - will be initialized with proper config path
client = None
# Pydantic models for API
class QueryRequest(BaseModel):
query: str
class ServerInfo(BaseModel):
name: str
status: str
tools_count: int
class ToolInfo(BaseModel):
name: str
description: str
server: str
class ModelInfo(BaseModel):
name: str
class ServerSelectionRequest(BaseModel):
server_names: List[str]
@classmethod
def __get_validators__(cls):
yield cls.validate_to_json
@classmethod
def validate_to_json(cls, value):
if isinstance(value, str):
value = json.loads(value)
return value
# OpenAI-compatible models
class ChatMessage(BaseModel):
role: str
content: str
class ChatCompletionRequest(BaseModel):
model: str
messages: List[ChatMessage]
stream: bool = False
temperature: Optional[float] = None
max_tokens: Optional[int] = None
class ChatCompletionChoice(BaseModel):
index: int
message: ChatMessage
finish_reason: str
class ChatCompletionUsage(BaseModel):
prompt_tokens: int
completion_tokens: int
total_tokens: int
class ChatCompletionResponse(BaseModel):
id: str
object: str = "chat.completion"
created: int
model: str
choices: List[ChatCompletionChoice]
usage: ChatCompletionUsage
class OpenAIModel(BaseModel):
id: str
object: str = "model"
created: int
owned_by: str = "ollama"
# Web MCP Client class
class WebMCPClient:
def __init__(self, config_path: str):
self.config_path = config_path
self.config = self._load_config()
self.sessions = {}
self.tools_by_server = {}
self.llm = None
self.agent = None
self.selected_model = None
self._exit_stack = AsyncExitStack()
logger.info(f"WebMCPClient initialized with config from {config_path}")
def _load_config(self) -> dict:
try:
with open(self.config_path, "r") as f:
return json.load(f)
except FileNotFoundError:
logger.warning(f"Config file not found at {self.config_path}")
return {"mcpServers": {}}
async def get_ollama_models(self) -> List[str]:
logger.info("Fetching Ollama models")
try:
async with httpx.AsyncClient() as client:
response = await client.get("http://127.0.0.1:11434/api/tags")
data = response.json()
models = [model["name"] for model in data.get("models", [])]
logger.info(f"Found {len(models)} Ollama models")
return models
except (httpx.ConnectError, httpx.RequestError) as e:
logger.error(f"Failed to connect to Ollama API: {str(e)}")
return []
async def initialize_servers(self, server_names: List[str] = None, force_reinit: bool = False) -> bool:
"""Initialize or reinitialize servers with optional cleanup"""
if not self.config.get("mcpServers"):
logger.error("No MCP Servers configured")
return False
servers = self.config["mcpServers"]
server_names = server_names or list(servers.keys())
# Check if reinitialization is needed
current_servers = set(self.sessions.keys())
requested_servers = set(server_names)
if not force_reinit and current_servers == requested_servers:
logger.info("Servers already initialized with same configuration")
return True
# Clean up existing sessions if needed
if current_servers or force_reinit:
logger.info("Cleaning up existing sessions for reinitialization")
await self.cleanup_sessions()
self.reset_agent()
logger.info(f"Initializing Servers: {', '.join(server_names)}")
initialized_servers = 0
for server_name in server_names:
if server_name not in servers:
logger.warning(f"Server '{server_name}' not found in config")
continue
server_config = servers[server_name]
command = server_config.get("command")
args = server_config.get("args", [])
logger.info(f"Initializing {server_name}")
server_params = StdioServerParameters(command=command, args=args)
try:
server_stack = AsyncExitStack()
reader, writer = await server_stack.enter_async_context(stdio_client(server_params))
session = await server_stack.enter_async_context(ClientSession(reader, writer))
await session.initialize()
await self._exit_stack.enter_async_context(server_stack)
self.sessions[server_name] = session
tools = await load_mcp_tools(session)
self.tools_by_server[server_name] = tools
initialized_servers += 1
logger.info(f"Server {server_name} initialized with {len(tools)} tools")
except Exception as e:
logger.error(f"Error initializing {server_name}: {str(e)}")
return initialized_servers > 0
async def initialize_llm(self, model_name: str) -> bool:
logger.info(f"Initializing LLM with model: {model_name}")
try:
self.llm = ChatOllama(model=model_name)
self.selected_model = model_name
logger.info("LLM initialized successfully")
return True
except Exception as e:
logger.error(f"Error initializing LLM: {str(e)}")
return False
async def create_agent(self) -> bool:
logger.info("Creating agent with tools and LLM")
if not self.llm:
logger.error("LLM not initialized")
return False
all_tools = [tool for tools in self.tools_by_server.values() for tool in tools]
if not all_tools:
logger.error("No Tools available")
return False
try:
self.agent = create_react_agent(self.llm, all_tools)
logger.info(f"Agent created successfully with {len(all_tools)} tools")
return True
except Exception as e:
logger.error(f"Error creating agent: {str(e)}")
return False
async def run_interaction(self, query: str) -> str:
logger.info(f"[run_interaction] Query: {query}")
if not self.agent:
logger.warning("Agent not initialized!")
return "⚠️ Agent is not initialized. Please check your model and tools."
try:
input_data = {"messages": query}
result = await self.agent.ainvoke(input_data)
logger.info(f"[run_interaction] Agent result: {result}")
# === Try extracting from LangGraph message list ===
if isinstance(result, dict) and "messages" in result:
msgs = result.get("messages", [])
logger.info(f"[run_interaction] Extracted {len(msgs)} messages")
for m in reversed(msgs):
if m.__class__.__name__ == "AIMessage":
content = getattr(m, "content", "").strip()
if content:
return content
for m in reversed(msgs):
if m.__class__.__name__ == "ToolMessage":
content = getattr(m, "content", "").strip()
if content:
return content
logger.warning("Messages found but no AIMessage or ToolMessage with content")
return "⚠️ Agent did not return any message content."
# === Fallback: convert raw result to string ===
return str(result).strip() or "⚠️ Agent returned an empty response."
except Exception as e:
logger.error(f"[run_interaction] Error: {str(e)}")
if "does not support tools" in str(e):
return "⚠️ This model does not support tool calling."
return f"❌ Unexpected error during interaction: {str(e)}"
def get_servers_info(self) -> List[ServerInfo]:
servers_info = []
for server_name in self.sessions:
tools_count = len(self.tools_by_server.get(server_name, []))
servers_info.append(ServerInfo(
name=server_name,
status="Connected",
tools_count=tools_count
))
return servers_info
def get_tools_info(self) -> List[ToolInfo]:
tools_info = []
for server_name, tools in self.tools_by_server.items():
for tool in tools:
tools_info.append(ToolInfo(
name=tool.name,
description=tool.description,
server=server_name
))
return tools_info
async def cleanup_sessions(self, server_names_to_keep: List[str] = None):
"""Clean up existing sessions, optionally keeping only specified servers"""
logger.info("Cleaning up existing MCP server sessions")
if server_names_to_keep is None:
# Close all sessions
servers_to_remove = list(self.sessions.keys())
else:
# Close only sessions not in the keep list
servers_to_remove = [name for name in self.sessions.keys() if name not in server_names_to_keep]
for server_name in servers_to_remove:
if server_name in self.sessions:
try:
# Simply remove from our tracking - let the exit stack handle cleanup
del self.sessions[server_name]
if server_name in self.tools_by_server:
del self.tools_by_server[server_name]
logger.info(f"Cleaned up session for server: {server_name}")
except Exception as e:
logger.error(f"Error cleaning up session for {server_name}: {str(e)}")
# Only close the exit stack if we're removing all servers
if server_names_to_keep is None or len(servers_to_remove) == len(self.sessions):
try:
await self._exit_stack.aclose()
except Exception as e:
logger.error(f"Error closing exit stack: {str(e)}")
finally:
self._exit_stack = AsyncExitStack()
def reset_agent(self):
"""Reset the agent to None"""
logger.info("Resetting agent")
self.agent = None
async def close(self):
logger.info("Closing all MCP server connections")
try:
await self._exit_stack.aclose()
except Exception as e:
logger.error(f"Error during cleanup: {str(e)}")
async def non_stream_chat_completion(user_message: str, request: ChatCompletionRequest):
"""Handle non-streaming chat completion"""
try:
logger.info(f"Processing non-stream query: {user_message}")
response = await client.run_interaction(user_message)
logger.info(f"LLM response: {response}")
# Create proper OpenAI-compatible response
completion_response = ChatCompletionResponse(
id=f"chatcmpl-{uuid.uuid4().hex[:8]}",
created=int(time.time()),
model=request.model,
choices=[
ChatCompletionChoice(
index=0,
message=ChatMessage(role="assistant", content=response),
finish_reason="stop"
)
],
usage=ChatCompletionUsage(
prompt_tokens=len(user_message.split()),
completion_tokens=len(response.split()),
total_tokens=len(user_message.split()) + len(response.split())
)
)
logger.info(f"Sending response: {completion_response}")
return completion_response
except Exception as e:
logger.error(f"Error in non_stream_chat_completion: {str(e)}")
raise HTTPException(status_code=500, detail=str(e))
async def stream_chat_completion(user_message: str, request: ChatCompletionRequest) -> AsyncGenerator[str, None]:
"""Continuously stream fake data while LLM is working, then stream real response"""
logger.info(f"[stream_chat_completion] Streaming with LLM fallback: {user_message}")
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created_time = int(time.time())
# 1. Send initial role chunk
initial_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": request.model,
"choices": [{
"index": 0,
"delta": {"role": "assistant"},
"finish_reason": None
}]
}
yield f"data: {json.dumps(initial_chunk)}\n\n"
await asyncio.sleep(0.01)
# 2. Start background task to get real response
response_task = asyncio.create_task(client.run_interaction(user_message))
# 3. While waiting, send filler tokens every few ms
dot_count = 0
while not response_task.done():
filler_char = "\u200b"
filler_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": request.model,
"choices": [{
"index": 0,
"delta": {"content": filler_char},
"finish_reason": None
}]
}
yield f"data: {json.dumps(filler_chunk)}\n\n"
dot_count += 1
await asyncio.sleep(3) # adjustable filler rate
# 4. Get the real response
try:
response = await response_task
logger.info(f"[stream_chat_completion] Got LLM response: {response}")
except Exception as e:
logger.error(f"[stream_chat_completion] LLM error: {str(e)}")
response = f"❌ LLM error: {str(e)}"
# 5. Stream actual content
for char in response:
content_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": request.model,
"choices": [{
"index": 0,
"delta": {"content": char},
"finish_reason": None
}]
}
yield f"data: {json.dumps(content_chunk)}\n\n"
await asyncio.sleep(0.005)
# 6. Final chunk
final_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": request.model,
"choices": [{
"index": 0,
"delta": {},
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(final_chunk)}\n\n"
yield "data: [DONE]\n\n"
async def auto_initialize_system(model_name: str):
"""Auto-initialize system with default settings"""
try:
logger.info(f"Auto-initializing system with model: {model_name}")
# Initialize LLM
llm_success = await client.initialize_llm(model_name)
if not llm_success:
raise Exception("Failed to initialize LLM")
# Initialize all available servers
available_servers = list(client.config.get("mcpServers", {}).keys())
logger.info(f"Available servers: {available_servers}")
if available_servers:
servers_success = await client.initialize_servers(available_servers)
if not servers_success:
logger.warning("No servers were successfully initialized")
# Create agent
agent_success = await client.create_agent()
if not agent_success:
raise Exception("Failed to create agent")
logger.info(f"System auto-initialized successfully with model {model_name}")
except Exception as e:
logger.error(f"Auto-initialization failed: {str(e)}")
raise HTTPException(status_code=500, detail=f"System initialization failed: {str(e)}")
# Routes
@app.get("/", response_class=HTMLResponse)
async def get_homepage():
"""Serve the main HTML file"""
try:
with open("static/index.html", "r", encoding="utf-8") as f:
return HTMLResponse(content=f.read())
except FileNotFoundError:
raise HTTPException(status_code=404, detail="index.html not found")
@app.get("/v1/models")
async def get_openai_models():
"""OpenAI-compatible models endpoint"""
models = await client.get_ollama_models()
return {
"object": "list",
"data": [
OpenAIModel(
id=model,
created=int(time.time()),
owned_by="ollama"
).model_dump() for model in models
]
}
@app.get("/test-stream")
async def test_stream():
"""Test endpoint to verify streaming format"""
def generate():
test_message = "Hello, this is a test streaming response!"
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created_time = int(time.time())
# Initial chunk with role
initial_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": "test-model",
"choices": [{
"index": 0,
"delta": {"role": "assistant"},
"finish_reason": None
}]
}
yield f"data: {json.dumps(initial_chunk)}\n\n"
# Content chunks
for char in test_message:
chunk_data = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": "test-model",
"choices": [{
"index": 0,
"delta": {"content": char},
"finish_reason": None
}]
}
yield f"data: {json.dumps(chunk_data)}\n\n"
# Final chunk
final_chunk = {
"id": completion_id,
"object": "chat.completion.chunk",
"created": created_time,
"model": "test-model",
"choices": [{
"index": 0,
"delta": {},
"finish_reason": "stop"
}]
}
yield f"data: {json.dumps(final_chunk)}\n\n"
yield "data: [DONE]\n\n"
return StreamingResponse(
generate(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"X-Accel-Buffering": "no"
}
)
@app.post("/v1/chat/completions_test")
async def chat_completions():
def generate():
completion_id = f"chatcmpl-{uuid.uuid4().hex[:8]}"
created = int(time.time())
model = "mistral"
# 1. Initial chunk (role only)
yield f"data: {json.dumps({\
'id': completion_id,\
'object': 'chat.completion.chunk',\
'created': created,\
'model': model,\
'choices': [{\
'index': 0,\
'delta': {'role': 'assistant'},\
'finish_reason': None\
}]\
})}\n\n"
time.sleep(0.05)
# 2. Stream content, character by character
message = "This is a working test response from mock server."
for char in message:
yield f"data: {json.dumps({\
'id': completion_id,\
'object': 'chat.completion.chunk',\
'created': created,\
'model': model,\
'choices': [{\
'index': 0,\
'delta': {'content': char},\
'finish_reason': None\
}]\
})}\n\n"
time.sleep(0.01)
# 3. Final chunk
yield f"data: {json.dumps({\
'id': completion_id,\
'object': 'chat.completion.chunk',\
'created': created,\
'model': model,\
'choices': [{\
'index': 0,\
'delta': {},\
'finish_reason': 'stop'\
}]\
})}\n\n"
# 4. End marker
yield "data: [DONE]\n\n"
return StreamingResponse(generate(), media_type="text/event-stream")
@app.post("/v1/chat/completions")
async def create_chat_completion(request: ChatCompletionRequest):
"""OpenAI-compatible chat completions endpoint with exact format for OpenWebUI"""
logger.info(f"Received chat completion request: model={request.model}, stream={request.stream}")
# Initialize system if not already done
if not client.agent:
logger.info("Agent not initialized, starting auto-initialization")
await auto_initialize_system(request.model)
# Extract user message from the most recent user message
user_message = None
for msg in reversed(request.messages):
if msg.role == "user":
user_message = msg.content
break
if not user_message:
raise HTTPException(status_code=400, detail="No user message found")
logger.info(f"Processing user message: {user_message}")
# Check if streaming is requested
if request.stream:
logger.info("Returning streaming response")
return StreamingResponse(
stream_chat_completion(user_message, request),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Headers": "*",
"Access-Control-Expose-Headers": "*",
"X-Accel-Buffering": "no",
"Transfer-Encoding": "chunked"
}
)
else:
logger.info("Returning non-streaming response")
response = await non_stream_chat_completion(user_message, request)
return JSONResponse(content=response.model_dump())
@app.post("/api/query")
async def process_query(request: QueryRequest):
response = await client.run_interaction(request.query)
return {"response": response}
if __name__ == "__main__":
uvicorn.run(app, host="0.0.0.0", port=8000)