> loading_
# Walkthrough: AI-Powered Plist Analysis in launchd-icon-helper
# This demonstrates the three-layer architecture: IPC handler, LLM call, and React display component.
# ──────────────────────────────────────────────────
# STEP 1: Main Process IPC Handler (Electron main entry file)
# ──────────────────────────────────────────────────
# Register an IPC handler that the renderer can invoke when a user
# clicks 'Explain this service'. It accepts parsed plist data,
# checks the local cache first, then calls the LLM if needed.
# const { ipcMain } = require('electron');
# const Store = require('electron-store');
# const cache = new Store({ name: 'ai-explanations-cache' });
#
# ipcMain.handle('explain-service', async (event, plistData) => {
# // plistData shape: { label, programPath, arguments, schedule }
#
# // Check cache first — keyed by service label to avoid redundant API calls
# const cached = cache.get(plistData.label);
# if (cached) {
# return cached; // Return early with cached structured JSON
# }
#
# // Build the prompt with the raw plist fields
# const prompt = `You are a macOS systems expert. Analyze this launchd service and return JSON with exactly three keys:
# - "description": a plain-English explanation of what this service does (2-3 sentences)
# - "safety_rating": one of "safe", "caution", or "critical" indicating risk of disabling it
# - "schedule_explanation": a human-readable description of when and how often it runs
#
# Service plist data:
# Label: ${plistData.label}
# Program: ${plistData.programPath}
# Arguments: ${JSON.stringify(plistData.arguments)}
# Schedule: ${JSON.stringify(plistData.schedule)}
# `;
#
# // Call the LLM — this example uses OpenAI, but you could swap in ollama for local inference
# const response = await fetch('https://api.openai.com/v1/chat/completions', {
# method: 'POST',
# headers: {
# 'Content-Type': 'application/json',
# 'Authorization': `Bearer ${process.env.OPENAI_API_KEY}`
# },
# body: JSON.stringify({
# model: 'gpt-4o',
# messages: [{ role: 'user', content: prompt }],
# response_format: { type: 'json_object' } // Enforce structured JSON output
# })
# });
#
# const data = await response.json();
# const explanation = JSON.parse(data.choices[0].message.content);
#
# // Cache the result keyed by service label
# cache.set(plistData.label, explanation);
#
# return explanation;
# });
# ──────────────────────────────────────────────────
# STEP 2: React Component — ServiceExplainer
# ──────────────────────────────────────────────────
# This component lives in the service detail/edit view.
# It renders the 'Explain this service' button and displays
# the AI-generated analysis with a color-coded safety badge.
# // ServiceExplainer.jsx
# import React, { useState } from 'react';
#
# const SAFETY_COLORS = {
# safe: '#22c55e', // green — go ahead and disable
# caution: '#f59e0b', // amber — research before disabling
# critical: '#ef4444' // red — do not disable unless you know what you're doing
# };
#
# export default function ServiceExplainer({ plistData }) {
# const [explanation, setExplanation] = useState(null);
# const [loading, setLoading] = useState(false);
#
# const handleExplain = async () => {
# setLoading(true);
# try {
# // Invoke the IPC handler registered in the main process
# const result = await window.electronAPI.explainService(plistData);
# setExplanation(result);
# } catch (err) {
# console.error('Failed to explain service:', err);
# } finally {
# setLoading(false);
# }
# };
#
# return (
# <div className="service-explainer">
# <button onClick={handleExplain} disabled={loading}>
# {loading ? 'Analyzing…' : '🤖 Explain this service'}
# </button>
#
# {explanation && (
# <div className="explanation-card">
# <p className="description">{explanation.description}</p>
#
# {/* Color-coded safety badge */}
# <span
# className="safety-badge"
# style={{ backgroundColor: SAFETY_COLORS[explanation.safety_rating] }}
# >
# {explanation.safety_rating.toUpperCase()}
# </span>
#
# <p className="schedule">
# <strong>Schedule:</strong> {explanation.schedule_explanation}
# </p>
# </div>
# )}
# </div>
# );
# }
# ──────────────────────────────────────────────────
# STEP 3: Preload Bridge (expose IPC to renderer securely)
# ──────────────────────────────────────────────────
# In your preload script, expose the explain-service channel
# so the React renderer can call it without direct node access.
# // preload.js
# const { contextBridge, ipcRenderer } = require('electron');
#
# contextBridge.exposeInMainWorld('electronAPI', {
# explainService: (plistData) => ipcRenderer.invoke('explain-service', plistData)
# });
# ──────────────────────────────────────────────────
# OPTIONAL: Swap OpenAI for local ollama inference
# ──────────────────────────────────────────────────
# If you want fully offline, privacy-preserving analysis:
#
# const response = await fetch('http://localhost:11434/api/generate', {
# method: 'POST',
# headers: { 'Content-Type': 'application/json' },
# body: JSON.stringify({
# model: 'llama3',
# prompt: prompt,
# format: 'json',
# stream: false
# })
# });
# const data = await response.json();
# const explanation = JSON.parse(data.response);