-
Notifications
You must be signed in to change notification settings - Fork 3
/
Copy pathapp.py
208 lines (191 loc) · 9.58 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
import gradio as gr
# from ontochat.functions import set_openai_api_key, user_story_generator, cq_generator, load_example_user_story, clustering_generator, ontology_testing, load_example
from ontochat.functions import set_openai_api_key, user_story_generator, load_example
user_story_template = """**Persona:**\n\n- Name: -\n- Age: -\n- Occupation: -\n- Skills: -\n- Interests: -\n\n**Goal:**\n\n- Description: -\n- Keywords: -\n\n**Scenario:**\n\n- Before: -\n- During: -\n- After: -\n\n**Example Data:**\n\n- Category: -\n- Data: -\n\n**Resources:**\n\n- Resource Name: -\n- Link: -"""
with gr.Blocks() as set_api_key:
gr.Markdown(
"""
# Welcome to OntoChat! 👋
### Introduction
Hi there! I'm OntoChat, your conversational assistant 🤖 here to support you generate ontology user stories 🎉. I simplify the process by combining human input with GenAI capabilities ✨. Whether you're experienced with prompt engineering or not, I'm here to guide you step by step by (1) asking questions ❓ for elicitation, (2) offering example answers 💡 for guidance, (3) providing predefined templates 📋 to help craft effective prompts, and (4) refining 🛠️ your input into a formal user story. Let's make ontology requirements collection easier and more efficient together 🚀! For more details, visit 🌐 [OntoChat on GitHub](https://github.com/King-s-Knowledge-Graph-Lab/OntoChat).
### Citations
[1] Zhang B, Carriero VA, Schreiberhuber K, Tsaneva S, González LS, Kim J, de Berardinis J. OntoChat: a Framework for Conversational Ontology Engineering using Language Models. arXiv preprint arXiv:2403.05921. 2024 Mar 9.
[2] Zhao Y, Zhang B, Hu X, Ouyang S, Kim J, Jain N, de Berardinis J, Meroño-Peñuela A, Simperl E. Improving Ontology Requirements Engineering with OntoChat and Participatory Prompting. InProceedings of the AAAI Symposium Series 2024 Nov 8 (Vol. 4, No. 1, pp. 253-257).
"""
)
with gr.Group():
api_key = gr.Textbox(
label="OpenAI API Key",
info="Please input your OpenAI API Key if you don't have it set up on your own machine. Please note that "
"the key will only be used for this demo and will not be uploaded or used anywhere else."
)
api_key_btn = gr.Button(value="Set API Key")
api_key_btn.click(fn=set_openai_api_key, inputs=api_key, outputs=api_key)
with gr.Blocks() as user_story_interface:
with gr.Row():
with gr.Column(scale=1):
user_story_chatbot = gr.Chatbot(
value=[
{"role": "assistant", "content": (
"Hello! I'm OntoChat 😊. I'll help you create an ontology user story! \n\n**1.** Don't worry about prompting—**find the template 📄 and edit the placeholders 📝** to craft a high-quality response 👍. \n\n**2.** Placeholders **\*\*[]\*\*** are **mandatory**; placeholders **\*[]\*** are **optional**. \n\n**3.** Feel free to ask OntoChat any questions if needed. \n\nLet's get started! **Which domain is this ontology for?** For example, 'Healthcare, Wine, Music, etc.'. Use template **[Create Domain]** to answer."
)}
],
height="472px",
type="messages"
)
user_story_input = gr.Textbox(
label="Message OntoChat",
placeholder="Please type your message here and press Enter to interact with the chatbot:",
max_lines = 20,
lines = 1
)
elicitation_questions_dataset = gr.Dataset(
components=[user_story_input],
label="Prompt Templates",
type="index",
samples=[
["Create Domain"],
["Create Persona"],
["Create User Goal"],
["Create Actions"],
["Create Keywords"],
["Create Current Methods"],
["Create Challenges"],
["Create New Methods"],
["Create Outcomes"]
],
samples_per_page = 10
)
user_story_input.submit(
fn=user_story_generator,
inputs=[user_story_input, user_story_chatbot],
outputs=[user_story_chatbot, user_story_input]
)
elicitation_questions_dataset.click(
fn=load_example,
inputs=[elicitation_questions_dataset],
outputs=[user_story_input]
)
# with gr.Blocks() as cq_interface:
# with gr.Row():
# with gr.Column():
# cq_chatbot = gr.Chatbot(
# value=[
# {
# "role": "assistant",
# "content": (
# "I am OntoChat, your conversational ontology engineering assistant. Here is the second step of "
# "the system. Please give me your user story and tell me how many competency questions you want "
# "me to generate from the user story."
# )
# }
# ],
# type="messages"
# )
# cq_input = gr.Textbox(
# label="Chatbot input",
# placeholder="Please type your message here and press Enter to interact with the chatbot:"
# )
# gr.Markdown(
# """
# ### User story examples
# Click the button below to use an example user story from
# [Linka](https://github.com/polifonia-project/stories/tree/main/Linka_Computer_Scientist) in Polifonia.
# """
# )
# example_btn = gr.Button(value="Use example user story")
# example_btn.click(
# fn=load_example_user_story,
# inputs=[],
# outputs=[cq_input]
# )
# cq_output = gr.TextArea(
# label="Competency questions",
# interactive=True
# )
# cq_input.submit(
# fn=cq_generator,
# inputs=[
# cq_input, cq_chatbot
# ],
# outputs=[
# cq_output, cq_chatbot, cq_input
# ]
# )
# clustering_interface = gr.Interface(
# fn=clustering_generator,
# inputs=[
# gr.TextArea(
# label="Competency questions",
# info="Please copy the previously generated competency questions and paste it here. You can also modify "
# "the questions before submitting them."
# ),
# gr.Dropdown(
# value="LLM clustering",
# choices=["LLM clustering", "Agglomerative clustering"],
# label="Clustering method",
# info="Please select the clustering method."
# ),
# gr.Textbox(
# label="Number of clusters (optional for LLM clustering)",
# info="Please input the number of clusters you want to generate. And please do not input a number that "
# "exceeds the total number of competency questions."
# )
# ],
# outputs=[
# gr.Image(label="Visualization"),
# gr.Code(
# language='json',
# label="Competency Question clusters"
# )
# ],
# title="OntoChat",
# description="This is the third step of OntoChat. Please copy the generated competency questions from the previous "
# "step and run the clustering algorithm to group the competency questions based on their topics. From "
# "our experience, LLM clustering has the best performance.",
# flagging_mode="never"
# )
# with gr.Blocks() as testing_interface:
# gr.Markdown(
# """
# # OntoChat
# This is the final part of OntoChat which performs ontology testing based on the input ontology file and CQs.
# """
# )
# with gr.Group():
# api_key = gr.Textbox(
# label="OpenAI API Key",
# placeholder="If you have set the key in other tabs, you don't have to set it again.",
# info="Please input your OpenAI API Key if you don't have it set up on your own machine. Please note that "
# "the key will only be used for this demo and will not be uploaded or used anywhere else."
# )
# api_key_btn = gr.Button(value="Set API Key")
# api_key_btn.click(fn=set_openai_api_key, inputs=api_key, outputs=api_key)
# ontology_file = gr.File(label="Ontology file")
# ontology_desc = gr.Textbox(
# label="Ontology description",
# placeholder="Please provide a description of the ontology uploaded to provide basic information and "
# "additional context."
# )
# cq_testing_input = gr.Textbox(
# label="Competency questions",
# placeholder="Please provide the competency questions that you want to test with."
# )
# testing_btn = gr.Button(value="Test")
# testing_output = gr.TextArea(label="Ontology testing output")
# testing_btn.click(
# fn=ontology_testing,
# inputs=[
# ontology_file, ontology_desc, cq_testing_input
# ],
# outputs=[
# testing_output
# ]
# )
demo = gr.TabbedInterface(
# [set_api_key, user_story_interface, cq_interface, clustering_interface, testing_interface],
[set_api_key, user_story_interface],
["Set API Key", "User Story Generation", "Competency Question Extraction", "Competency Question Analysis", "Ontology Testing"]
)
if __name__ == "__main__":
demo.launch(share=True)