Skip to content

Commit

Permalink
Releases/rc trulens eval 0.18.3 (#652)
Browse files Browse the repository at this point in the history
* bump version

* version bump
  • Loading branch information
joshreini1 authored Dec 7, 2023
1 parent fd3c94f commit 21d3dcf
Show file tree
Hide file tree
Showing 20 changed files with 60 additions and 133 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install anthropic trulens_eval==0.17.0 langchain==0.0.323"
"# ! pip install anthropic trulens_eval==0.18.3 langchain==0.0.347"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
"#! pip install trulens-eval==0.18.2 llama_index==0.9.13 langchain==0.0.346 html2text==2020.1.16"
"#! pip install trulens-eval==0.18.3 llama_index==0.9.13 langchain==0.0.346 html2text==2020.1.16"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion trulens_eval/examples/expositional/models/bedrock.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install trulens_eval==0.18.2 langchain==0.0.305 boto3==1.28.59"
"# ! pip install trulens_eval==0.18.3 langchain==0.0.305 boto3==1.28.59"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@
},
"outputs": [],
"source": [
"# ! pip install --upgrade sagemaker datasets boto3 trulens_eval"
"# ! pip install trulens_eval==0.18.3 sagemaker datasets boto3 "
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
"metadata": {},
"outputs": [],
"source": [
"#! pip install google-cloud-aiplatform==1.36.3 litellm==0.14.1 trulens_eval==0.17.0 langchain==0.0.323"
"#! pip install google-cloud-aiplatform==1.36.3 litellm==1.11.1 trulens_eval==0.18.3 langchain==0.0.347"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
"metadata": {},
"outputs": [],
"source": [
"#! pip install openai==0.28.1 litellm==0.14.1 trulens_eval==0.17.0 langchain==0.0.323"
"#! pip install openai==1.3.7 litellm==1.11.1 trulens_eval==0.18.3 langchain==0.0.347"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion trulens_eval/examples/quickstart/groundtruth_evals.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install trulens_eval==0.18.2 openai==1.3.1"
"# ! pip install trulens_eval==0.18.3 openai==1.3.7"
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion trulens_eval/examples/quickstart/human_feedback.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install trulens_eval==0.18.2 openai==1.3.1"
"# ! pip install trulens_eval==0.18.3 openai==1.3.7"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install trulens_eval==0.18.2 openai==1.3.1"
"# ! pip install trulens_eval==0.18.3 openai==1.3.7"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
"metadata": {},
"outputs": [],
"source": [
"# pip install trulens_eval==0.18.2 llama_index>=0.8.69 html2text>=2020.1.16 "
"# pip install trulens_eval==0.18.3 llama_index>=0.8.69 html2text>=2020.1.16 "
]
},
{
Expand Down
2 changes: 1 addition & 1 deletion trulens_eval/examples/quickstart/prototype_evals.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
"metadata": {},
"outputs": [],
"source": [
"# ! pip install trulens_eval==0.18.2"
"# ! pip install trulens_eval==0.18.3"
]
},
{
Expand Down
76 changes: 19 additions & 57 deletions trulens_eval/examples/quickstart/py_script_quickstarts/all_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,13 @@
# In[ ]:


# ! pip install trulens_eval==0.18.2 openai==1.3.1
# ! pip install trulens_eval==0.18.3 openai==1.3.7


# In[ ]:


import os

os.environ["OPENAI_API_KEY"] = "..."
os.environ["HUGGINGFACE_API_KEY"] = "..."

Expand All @@ -34,22 +33,18 @@
from IPython.display import JSON

# Imports main tools:
from trulens_eval import Feedback
from trulens_eval import Huggingface
from trulens_eval import Tru
from trulens_eval import TruChain
from trulens_eval import TruChain, Feedback, Huggingface, Tru
from trulens_eval.schema import FeedbackResult

tru = Tru()

# Imports from langchain to build app. You may need to install langchain first
# with the following:
# ! pip install langchain>=0.0.170
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import ChatPromptTemplate
from langchain.prompts import ChatPromptTemplate, PromptTemplate
from langchain.prompts import HumanMessagePromptTemplate
from langchain.prompts import PromptTemplate


# ### Create Simple LLM Application
#
Expand Down Expand Up @@ -191,7 +186,7 @@
# In[ ]:


# pip install trulens_eval==0.18.2 llama_index>=0.8.69 html2text>=2020.1.16
# pip install trulens_eval==0.18.3 llama_index>=0.8.69 html2text>=2020.1.16


# ### Add API keys
Expand All @@ -201,7 +196,6 @@


import os

os.environ["OPENAI_API_KEY"] = "..."


Expand All @@ -210,9 +204,7 @@
# In[ ]:


from trulens_eval import Feedback
from trulens_eval import Tru
from trulens_eval import TruLlama
from trulens_eval import Feedback, Tru, TruLlama
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI

Expand Down Expand Up @@ -324,25 +316,16 @@
# In[ ]:


# ! pip install trulens_eval==0.18.2 chromadb==0.4.18 openai==1.3.1
# ! pip install trulens_eval==0.18.3 chromadb==0.4.18 openai==1.3.7


# In[ ]:


import os

os.environ["OPENAI_API_KEY"] = "sk-..."


# In[ ]:


from openai import OpenAI

oai_client = OpenAI()


# ## Get Data
#
# In this case, we'll just initialize some simple text in the notebook.
Expand All @@ -366,6 +349,9 @@
# In[ ]:


from openai import OpenAI
oai_client = OpenAI()

oai_client.embeddings.create(
model="text-embedding-ada-002",
input=university_info
Expand All @@ -377,15 +363,12 @@

import chromadb
from chromadb.utils.embedding_functions import OpenAIEmbeddingFunction
from openai import OpenAI

oai_client = OpenAI()

embedding_function = OpenAIEmbeddingFunction(api_key=os.environ.get('OPENAI_API_KEY'),
model_name="text-embedding-ada-002")


chroma_client = chromadb.PersistentClient(path="./chromadb")
chroma_client = chromadb.Client()
vector_store = chroma_client.get_or_create_collection(name="Universities",
embedding_function=embedding_function)

Expand All @@ -407,7 +390,6 @@

from trulens_eval import Tru
from trulens_eval.tru_custom_app import instrument

tru = Tru()


Expand Down Expand Up @@ -464,13 +446,12 @@ def query(self, query: str) -> str:
# In[ ]:


import numpy as np

from trulens_eval import Feedback
from trulens_eval import Select
from trulens_eval import Feedback, Select
from trulens_eval.feedback import Groundedness
from trulens_eval.feedback.provider.openai import OpenAI as fOpenAI

import numpy as np

# Initialize provider class
fopenai = fOpenAI()

Expand Down Expand Up @@ -507,7 +488,6 @@ def query(self, query: str) -> str:


from trulens_eval import TruCustomApp

tru_rag = TruCustomApp(rag,
app_id = 'RAG v1',
feedbacks = [f_groundedness, f_qa_relevance, f_context_relevance])
Expand Down Expand Up @@ -549,7 +529,7 @@ def query(self, query: str) -> str:
# In[ ]:


# ! pip install trulens_eval==0.18.2
# ! pip install trulens_eval==0.18.3


# In[ ]:
Expand All @@ -569,7 +549,6 @@ def query(self, query: str) -> str:


import os

os.environ["OPENAI_API_KEY"] = "..."


Expand All @@ -579,12 +558,10 @@ def query(self, query: str) -> str:


from openai import OpenAI

oai_client = OpenAI()

from trulens_eval.tru_custom_app import instrument


class APP:
@instrument
def completion(self, prompt):
Expand Down Expand Up @@ -626,7 +603,6 @@ def completion(self, prompt):

# add trulens as a context manager for llm_app with dummy feedback
from trulens_eval import TruCustomApp

tru_app = TruCustomApp(llm_app,
app_id = 'LLM App v1',
feedbacks = [f_positive_sentiment])
Expand Down Expand Up @@ -656,7 +632,7 @@ def completion(self, prompt):
# In[ ]:


# ! pip install trulens_eval==0.18.2 openai==1.3.1
# ! pip install trulens_eval==0.18.3 openai==1.3.7


# In[ ]:
Expand Down Expand Up @@ -690,12 +666,10 @@ def completion(self, prompt):


from openai import OpenAI

oai_client = OpenAI()

from trulens_eval.tru_custom_app import instrument


class APP:
@instrument
def completion(self, prompt):
Expand All @@ -716,7 +690,6 @@ def completion(self, prompt):

# add trulens as a context manager for llm_app
from trulens_eval import TruCustomApp

tru_app = TruCustomApp(llm_app, app_id = 'LLM App v1')


Expand Down Expand Up @@ -745,9 +718,7 @@ def completion(self, prompt):
# In[ ]:


from ipywidgets import Button
from ipywidgets import HBox
from ipywidgets import VBox
from ipywidgets import Button, HBox, VBox

thumbs_up_button = Button(description='👍')
thumbs_down_button = Button(description='👎')
Expand Down Expand Up @@ -802,14 +773,13 @@ def on_thumbs_down_button_clicked(b):
# In[ ]:


# ! pip install trulens_eval==0.18.2 openai==1.3.1
# ! pip install trulens_eval==0.18.3 openai==1.3.7


# In[2]:


import os

os.environ["OPENAI_API_KEY"] = "..."


Expand All @@ -827,12 +797,10 @@ def on_thumbs_down_button_clicked(b):


from openai import OpenAI

oai_client = OpenAI()

from trulens_eval.tru_custom_app import instrument


class APP:
@instrument
def completion(self, prompt):
Expand Down Expand Up @@ -875,7 +843,6 @@ def completion(self, prompt):

# add trulens as a context manager for llm_app
from trulens_eval import TruCustomApp

tru_app = TruCustomApp(llm_app, app_id = 'LLM App v1', feedbacks = [f_groundtruth])


Expand Down Expand Up @@ -1040,11 +1007,7 @@ def completion(self, prompt):
# In[ ]:


from trulens_eval import Feedback
from trulens_eval import Provider
from trulens_eval import Select
from trulens_eval import Tru

from trulens_eval import Provider, Feedback, Select, Tru

class StandAlone(Provider):
def custom_feedback(self, my_text_field: str) -> float:
Expand Down Expand Up @@ -1105,7 +1068,6 @@ def custom_feedback(self, my_text_field: str) -> float:

# Aggregators will run on the same dict keys.
import numpy as np

multi_output_feedback = Feedback(lambda input_param: {'output_key1': 0.1, 'output_key2': 0.9}, name="multi-agg").on(
input_param=Select.RecordOutput
).aggregate(np.mean)
Expand Down
Loading

0 comments on commit 21d3dcf

Please sign in to comment.