Skip to main content

Documentation Index

Fetch the complete documentation index at: https://docs.clarifeye.ai/llms.txt

Use this file to discover all available pages before exploring further.

Feedbacks are user responses to agent interactions that can be used to improve the system, classify issues, and generate ground truths.

List Feedbacks

# List feedbacks with a specific status
feedbacks = warehouse.list_feedbacks(status="to_review")

for feedback in feedbacks:
    print(f"Feedback ID: {feedback.id}")
    print(f"Status: {feedback.status}")
    print(f"Feedback: {feedback.feedback}")
    print(f"Is Positive: {feedback.is_positive}")
    print(f"Conversation: {feedback.conversation_title}")
    print(f"Given by: {feedback.given_by}")
    print("---")

# List all feedbacks (any status)
all_feedbacks = warehouse.list_feedbacks(status="")

Update Feedback Status

# Update a feedback's status
updated_feedback = warehouse.update_feedback(
    feedback_id="feedback-id",
    status="reviewed"  # Options: "to_review", "reviewed"
)

print(f"Updated status: {updated_feedback['status']}")

Classify Feedbacks

Automatically classify feedbacks into categories to understand user concerns:
from openai import OpenAI

# Initialize your LLM client
llm_client = OpenAI(api_key="your-openai-key")

# Get feedbacks to classify
feedbacks = warehouse.list_feedbacks(status="to_review")

# Classify feedbacks into categories:
# - Ground Truth: Factual corrections or validations
# - Tone/Formatting: Presentation or style feedback
# - Content: Substance or data quality feedback
# - Other: Doesn't fit above categories
classifications = warehouse.classify_feedbacks(
    feedback_list=feedbacks,
    llm_client=llm_client,
    system_prompt="You are a helpful assistant that classifies feedbacks into categories.",
    model="gpt-4o-mini"
)

# Process classifications
for classification in classifications:
    feedback = classification.feedback
    category = classification.feedback_classification.classification
    reason = classification.feedback_classification.reason

    print(f"Feedback: {feedback.feedback}")
    print(f"Category: {category}")
    print(f"Reason: {reason}")
    print("---")

    # Update feedback status based on classification
    if category == "Ground Truth":
        warehouse.update_feedback(feedback.id, status="reviewed")

Create Ground Truths from Feedbacks

Automatically extract validated question-answer pairs from user feedback:
from openai import OpenAI

# Initialize your LLM client
llm_client = OpenAI(api_key="your-openai-key")

# Get feedbacks that contain ground truth information
feedbacks = warehouse.list_feedbacks(status="to_review")

# Optionally, classify first to filter for ground truth feedbacks
classifications = warehouse.classify_feedbacks(
    feedback_list=feedbacks,
    llm_client=llm_client,
    system_prompt="You are a helpful assistant that extracts ground truths from feedbacks.",
    model="gpt-4o-mini"
)

# Filter for ground truth feedbacks
ground_truth_feedbacks = [
    c.feedback for c in classifications
    if c.feedback_classification.classification == "Ground Truth"
]

# Extract ground truths from the feedbacks
ground_truths = warehouse.create_ground_truths_from_feedbacks(
    feedbacks=ground_truth_feedbacks,
    llm_client=llm_client,
    system_prompt="You are a helpful assistant that extracts ground truths from feedbacks.",
    model="gpt-4o-mini"
)

# Save the extracted ground truths
for gt in ground_truths:
    created_gt = warehouse.create_ground_truth(
        query=gt.query,
        answer=gt.answer,
        additional_notes=gt.additional_notes,
        source_feedback_id=gt.source_feedback_id
    )
    print(f"Created ground truth: {created_gt['id']}")

    # Mark the feedback as reviewed
    warehouse.update_feedback(gt.source_feedback_id, status="reviewed")