Press n or j to go to the next uncovered block, b, p or k for the previous block.
| 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | import OpenAI from "openai";
import type {
AIProvider,
AIAnalysisRequest,
AIProviderResponse,
AIAnalysisResult,
} from "../types";
import { buildAnalysisPrompt } from "../prompts";
import { calculateCost } from "../costCalculator";
export class OpenAIProvider implements AIProvider {
private client: OpenAI | null = null;
private modelName: string;
constructor(apiKey?: string, modelName: string = "gpt-4o") {
if (apiKey) {
this.client = new OpenAI({ apiKey });
}
this.modelName = modelName;
}
getName(): string {
return this.modelName;
}
getProvider(): string {
return "openai";
}
isAvailable(): boolean {
return this.client !== null;
}
async analyze(request: AIAnalysisRequest): Promise<AIProviderResponse> {
if (!this.client) {
throw new Error("OpenAI API key not configured");
}
const prompt = buildAnalysisPrompt(request);
const startTime = Date.now();
try {
const response = await this.client.chat.completions.create({
model: this.modelName,
messages: [
{
role: "system",
content: "You are a helpful AI coach analyzing progress check-ins. Respond with valid JSON only.",
},
{
role: "user",
content: prompt,
},
],
response_format: { type: "json_object" },
max_tokens: 500,
});
const latencyMs = Date.now() - startTime;
const content = response.choices[0]?.message?.content;
if (!content) {
throw new Error("No content in response");
}
// Parse JSON response
const result = JSON.parse(content) as AIAnalysisResult;
// Validate required fields
if (!result.insight || !result.sentiment) {
throw new Error("Missing required fields in response");
}
// Calculate cost
const promptTokens = response.usage?.prompt_tokens || 0;
const completionTokens = response.usage?.completion_tokens || 0;
const totalTokens = promptTokens + completionTokens;
const estimatedCost = calculateCost(this.modelName, promptTokens, completionTokens);
return {
result,
metrics: {
promptTokens,
completionTokens,
totalTokens,
latencyMs,
estimatedCost,
},
modelName: this.modelName,
provider: "openai",
endpoint: "/v1/chat/completions",
};
} catch (error) {
const latencyMs = Date.now() - startTime;
throw new Error(
`OpenAI API error after ${latencyMs}ms: ${error instanceof Error ? error.message : String(error)}`
);
}
}
}
|