Skip to content

Commit

Permalink
Refactor project
Browse files Browse the repository at this point in the history
Signed-off-by: kerthcet <[email protected]>
  • Loading branch information
kerthcet committed Nov 18, 2024
1 parent a5b8379 commit 68755e1
Show file tree
Hide file tree
Showing 21 changed files with 1,370 additions and 2,599 deletions.
10 changes: 10 additions & 0 deletions .github/workflow/kube-workflow-init.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
name: Initialization Workflow

on:
workflow_dispatch:

jobs:
init:
uses: kerthcet/github-workflow-as-kube/.github/workflows/[email protected]
secrets:
AGENT_TOKEN: ${{ secrets.AGENT_TOKEN }}
21 changes: 21 additions & 0 deletions .github/workflow/kube-workflow.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
name: Event Workflow

on:
issues:
types:
- opened
issue_comment:
types:
- created
pull_request_target:
types:
- opened
- synchronize
- labeled
- unlabeled

jobs:
event-handler:
uses: kerthcet/github-workflow-as-kube/.github/workflows/[email protected]
secrets:
AGENT_TOKEN: ${{ secrets.AGENT_TOKEN }}
Binary file removed images/webui.jpg
Binary file not shown.
Empty file removed llmaz/finetune/__init__.py
Empty file.
File renamed without changes.
12 changes: 12 additions & 0 deletions llmaz/libs/consts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
# GVKs
group = "llmaz.io"
version = "v1alpha1"
openmodel_plural = "openmodels"

# object label keys.
label_key_family_name = "llmaz.io/model-family-name"
label_key_model_name = "llmaz.io/model-name"

# Model hubs.
huggingface = "Huggingface"
modelscope = "ModelScope"
9 changes: 9 additions & 0 deletions llmaz/libs/inference_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
from dataclasses import dataclass


@dataclass(frozen=True)
class InferenceService:
name: str
namespace: str
model_family_name: str
model_name: str
21 changes: 21 additions & 0 deletions llmaz/libs/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
from kubernetes import client
from kubernetes.client import CustomObjectsApi

from llmaz.libs.consts import group, version, openmodel_plural


def get_openmodel_objects(api: CustomObjectsApi, namespace: None):
try:
if namespace:
# TODO: we do not support namespaced models yet.
raise NotImplementedError("do not support namespaced models yet")
else:
response = api.list_cluster_custom_object(
group=group,
version=version,
plural=openmodel_plural,
)
return response.get("items", [])
except client.exceptions.ApiException as e:
print(f"Error fetching OpenModel objects: {e}")
return []
Empty file removed llmaz/models/__init__.py
Empty file.
Empty file removed llmaz/serves/__init__.py
Empty file.
24 changes: 0 additions & 24 deletions llmaz/serves/chatbot.py

This file was deleted.

32 changes: 0 additions & 32 deletions llmaz/serves/serve.py

This file was deleted.

23 changes: 7 additions & 16 deletions llmaz/webui/engine.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,12 @@
from llmaz.serves.serve import Serve
from kubernetes import config
from llmaz.libs.models import get_openmodel_objects


class Engine:
def __init__(self) -> None:
self.serve = Serve()
self.__model_loaded = False
# config.load_incluster_config()
pass

def model_loaded(self) -> bool:
return self.__model_loaded

def preload_model(self, model_name_or_path, task, temperature, stream) -> None:
if model_name_or_path is None or model_name_or_path == "":
raise Exception("no model_name_or_path provided")

self.serve.preload_model(model_name_or_path, task, temperature, stream)
self.__model_loaded = True

def offload_model(self) -> None:
self.serve.offload_model()
self.__model_loaded = False
def get_models(self):
# objs = get_openmodel_objects()
pass
18 changes: 6 additions & 12 deletions llmaz/webui/webui.py
Original file line number Diff line number Diff line change
@@ -1,23 +1,17 @@
# TODO: Using logging once ready
# import logging

import gradio as gr

from llmaz.webui.engine import Engine
from llmaz.webui.webui_serving import create_serving_webui
from llmaz.webui.webui_finetune import create_finetune_webui
from llmaz.webui.webui_prompt import create_prompt_webui
from llmaz.webui.webui_chat import create_chat_webui
from llmaz.webui.webui_market import create_market_webui


def launch_webui() -> gr.Blocks:
engine = Engine()

with gr.Blocks(title="Llmaz") as blocks:
with gr.Blocks(title="llmboard") as blocks:
# TODO: Model Market Tab, list and create serving services.
# create_market_webui(engine)
# Serving Tab
create_serving_webui(engine)
# Prompt Tab
create_prompt_webui(engine)
# Fine-tuning Tab
create_finetune_webui(engine)
create_chat_webui(engine)

return blocks
63 changes: 63 additions & 0 deletions llmaz/webui/webui_chat.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import random

import gradio as gr

from llmaz.webui.engine import Engine

loaded = False


def create_chat_webui(engine: Engine) -> gr.Tab:
def predict(message, history):
print(f"message: {message}, history: {history}")
return random.choice(["Yes", "No"])

data = {
"family1": ["model1", "model2", "model3"],
"family2": ["model4", "model5"],
"family3": ["model6", "model7", "model8", "model9"],
"llama": ["model6", "model7", "model8", "llama2"],
}

services = {
"llama2": ["service1", "service2", "service3", "service4"],
}

def get_models(family):
models = data.get(family, [])
return gr.update(choices=models, value=models[0] if models else None)

def get_services(model):
svcs = services.get(model, [])
return gr.update(choices=svcs, value=svcs[0] if svcs[0] else None)

with gr.Tab("Chat"):
with gr.Row():
family_dropdown = gr.Dropdown(
label="Select Model Family", choices=list(data.keys()), value=None
)
model_dropdown = gr.Dropdown(
label="Select Model", choices=[], value=None, interactive=True
)
service_dropdown = gr.Dropdown(
label="Select Service", choices=[], value=None, interactive=True
)

family_dropdown.change(
get_models, inputs=family_dropdown, outputs=model_dropdown
)
model_dropdown.change(
get_services, inputs=model_dropdown, outputs=service_dropdown
)

with gr.Accordion("click for more parameters...", open=False):
temperature = gr.Slider(0, 100, step=5, label="temperature")
stream = gr.Checkbox(label="stream")

gr.ChatInterface(
fn=predict,
additional_inputs=[
gr.Textbox(placeholder="this is optional", label="System Prompt"),
],
type="messages",
)
8 changes: 0 additions & 8 deletions llmaz/webui/webui_finetune.py

This file was deleted.

35 changes: 35 additions & 0 deletions llmaz/webui/webui_market.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
import gradio as gr
import requests

from llmaz.webui.engine import Engine


def fetch_readme(url):
response = requests.get(url)
if response.status_code == 200:
return response.text # 返回 Markdown 文本
else:
return f"Load document error"


def create_market_webui(engine: Engine) -> gr.Tab:
with gr.Tab("Model Market"):
with gr.Row():
with gr.Group():
gr.Markdown("## Model Card1")

with gr.Group():
gr.Markdown("## Model Card2")

with gr.Group():
gr.Markdown("## Model Card3")

with gr.Row():
with gr.Group():
gr.Markdown("## Model Card4")

with gr.Group():
gr.Markdown("## Model Card5")

with gr.Group():
gr.Markdown("## Model Card6")
8 changes: 0 additions & 8 deletions llmaz/webui/webui_prompt.py

This file was deleted.

84 changes: 0 additions & 84 deletions llmaz/webui/webui_serving.py

This file was deleted.

Loading

0 comments on commit 68755e1

Please sign in to comment.