forked from mfekadu/nimbus-transformer
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
executable file
·62 lines (54 loc) · 1.59 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
#!/usr/bin/env python3
from ntfp.ntfp import get_context, transformer
from ntfp.ntfp_types import (
Answer,
Context,
Query,
Question,
WebPage,
ExtraDataDict,
Score,
Start,
End,
)
from typing import Tuple, cast
import pandas as pd
from os import listdir
if __name__ == "__main__":
print("\n")
user_input: str = input("question: ")
question: Question = Question(user_input)
google_data: Tuple[Query, WebPage, Context] = get_context(question)
query, page, context = google_data
print("len(context): ", len(context))
trfrmr_data: Tuple[Answer, ExtraDataDict] = transformer(question, context)
answer = trfrmr_data[0]
extra_data = trfrmr_data[1]
print("\n\n\nanswer: ", answer)
CSV_FILENAME = "data.csv"
score: Score = cast(Score, extra_data["score"])
start: Start = cast(Start, extra_data["start"])
end: End = cast(End, extra_data["end"])
tokenizer: str = extra_data["tokenizer"]
model: str = extra_data["model"]
# fmt:off
data = {
"question": [question],
"query": [query],
"answer": [answer],
"score": [score],
"start": [start],
"end": [end],
"tokenizer": [tokenizer],
"model": [model],
"context": [context],
"page": [page],
}
# fmt:on
df: pd.DataFrame = pd.DataFrame(data)
if CSV_FILENAME in listdir("."):
df.to_csv(CSV_FILENAME, mode="a", header=False)
print(f"appended new row to {CSV_FILENAME}")
else:
df.to_csv(CSV_FILENAME)
print(f"created {CSV_FILENAME} and appended a row.")