> loading_
# src/ai_insights.py — AI-powered health insight summarization for Jeeves
# This module introduces a single public function that accepts a consolidated
# health snapshot dict and returns a structured, plain-language interpretation.
import os
import json
from openai import OpenAI # pip install openai>=1.0.0
# Initialize the client once at module level.
# Supports OpenAI API or any compatible endpoint (ollama, vLLM, etc.)
# by overriding OPENAI_API_BASE in your environment.
client = OpenAI(
api_key=os.getenv("OPENAI_API_KEY", "sk-placeholder"),
base_url=os.getenv("OPENAI_API_BASE", "https://api.openai.com/v1"),
)
# We use a compact model (gpt-4o-mini) to keep token costs low on hourly runs.
# Switch to a local model name like "llama3" if routing through ollama.
MODEL = os.getenv("JEEVES_LLM_MODEL", "gpt-4o-mini")
# System prompt: establishes health-domain expertise and output structure.
# Kept intentionally concise — every token counts at hourly frequency.
SYSTEM_PROMPT = """You are a personal health analyst. Given a user's biometric
scores and any active environmental safety alerts, respond with EXACTLY three
sections separated by blank lines:
1. SUMMARY — One to two sentences interpreting the overall health snapshot in
plain language. Reference specific scores.
2. CORRELATIONS — Note any meaningful connections between biometric readings
and active safety/environmental alerts. If none exist, say so briefly.
3. RECOMMENDATIONS — Two to three concise, actionable suggestions for the day
based on the data.
Be direct. No disclaimers. No markdown headers. Keep total response under 180
words to fit a Telegram message comfortably."""
def _build_user_prompt(snapshot: dict) -> str:
"""Convert the consolidated snapshot dict into a token-efficient user prompt.
# We serialize only the fields the LLM needs — no raw API payloads.
# snapshot is expected to contain keys like:
# oura: { readiness, sleep_score, activity_score, stress, resilience,
# heart_rate_avg, heart_rate_resting, spo2 }
# sirenwise_alerts: [ { title, severity, description } ]
"""
oura = snapshot.get("oura", {})
alerts = snapshot.get("sirenwise_alerts", [])
# Build a compact text block instead of dumping raw JSON
lines = [
f"Readiness: {oura.get('readiness', 'N/A')}",
f"Sleep score: {oura.get('sleep_score', 'N/A')}",
f"Activity score: {oura.get('activity_score', 'N/A')}",
f"Stress level: {oura.get('stress', 'N/A')}",
f"Resilience: {oura.get('resilience', 'N/A')}",
f"Avg heart rate: {oura.get('heart_rate_avg', 'N/A')} bpm",
f"Resting heart rate: {oura.get('heart_rate_resting', 'N/A')} bpm",
f"SpO2: {oura.get('spo2', 'N/A')}%",
]
if alerts:
lines.append("\nActive environmental alerts:")
for alert in alerts:
lines.append(
f"- [{alert.get('severity', 'unknown')}] "
f"{alert.get('title', 'Alert')}: {alert.get('description', '')}"
)
else:
lines.append("\nNo active environmental alerts.")
return "\n".join(lines)
def generate_health_insights(snapshot: dict) -> str:
"""Generate an AI-powered health narrative from a consolidated snapshot.
# This is the public API. Call it from monitor.py after data aggregation
# but before the delivery step (Telegram / CLI).
#
# Returns a plain-text string ready to append to the user message.
# On failure (network, rate-limit, bad key), returns a graceful fallback
# so the raw data delivery is never blocked.
"""
user_prompt = _build_user_prompt(snapshot)
try:
response = client.chat.completions.create(
model=MODEL,
messages=[
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": user_prompt},
],
temperature=0.3, # Low creativity — we want consistent, factual tone
max_tokens=250, # Hard ceiling to stay Telegram-friendly
)
return response.choices[0].message.content.strip()
except Exception as exc:
# Never let the insight layer break the delivery pipeline.
# Log the error and return a human-readable fallback.
print(f"[ai_insights] LLM call failed: {exc}")
return "(AI health summary unavailable this cycle — raw data shown above.)"
# ---------------------------------------------------------------------------
# Wiring into the existing monitor pipeline (src/monitor.py)
# ---------------------------------------------------------------------------
# In your existing delivery function, import and call generate_health_insights:
#
# from ai_insights import generate_health_insights
#
# def deliver_snapshot(snapshot: dict) -> None:
# """Send consolidated snapshot + AI narrative via Telegram/CLI."""
# raw_message = format_raw_snapshot(snapshot) # your existing formatter
#
# # Generate the AI-powered interpretation
# insights = generate_health_insights(snapshot)
#
# # Combine raw data and narrative into a single message
# full_message = (
# f"{raw_message}\n\n"
# f"--- AI Health Insights ---\n"
# f"{insights}"
# )
#
# send_telegram(full_message) # existing Telegram delivery
# print(full_message) # CLI fallback
#
# ---------------------------------------------------------------------------
# Environment variables to configure:
# OPENAI_API_KEY — required for OpenAI; ignored if using local ollama
# OPENAI_API_BASE — set to http://localhost:11434/v1 for ollama
# JEEVES_LLM_MODEL — defaults to gpt-4o-mini; set to llama3 for local
# ---------------------------------------------------------------------------