import os
from dotenv import find_dotenv, load_dotenv
from langtrace_python_sdk import langtrace
from langtrace_python_sdk.utils.with_root_span import with_langtrace_root_span
_ = load_dotenv(find_dotenv())
langtrace.init()
from cleanlab_tlm import TLM
from openai import OpenAI
openai_client = OpenAI()
tlm = TLM(
api_key=os.getenv("TLM_API_KEY"),
options={"log": ["explanation"], "model": "gpt-4o-mini"},
)
def inference(prompt: str):
response = openai_client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "user", "content": prompt},
],
stream=False,
)
response_text = response.choices[0].message.content
return response_text
@with_langtrace_root_span("Get Trustworthiness Score")
def inference_get_trustworthiness_score(prompt: str):
response = inference(prompt)
return tlm.get_trustworthiness_score(prompt, response)
print(inference_get_trustworthiness_score("How many r's are in strawberry?"))