import OpenAI from "openai";
import { ProvenanceKit } from "@provenancekit/sdk";
import { aiExtension } from "@provenancekit/extensions";
import { createHash } from "crypto";
const openai = new OpenAI();
const pk = new ProvenanceKit({ apiKey: process.env.PK_API_KEY! });
async function generateWithProvenance(prompt: string, sessionId: string) {
// Register entities once and cache the IDs
const humanId = await pk.entity({ role: "human", name: "User" });
const aiId = await pk.entity({
role: "ai",
name: "gpt-4o",
aiAgent: { model: { provider: "openai", model: "gpt-4o" } },
});
// Run the model
const completion = await openai.chat.completions.create({
model: "gpt-4o",
messages: [{ role: "user", content: prompt }],
});
const output = completion.choices[0].message.content ?? "";
const promptHash = "sha256:" + createHash("sha256").update(prompt).digest("hex");
const outputCid = "sha256:" + createHash("sha256").update(output).digest("hex");
// Record provenance
await pk.file({
type: "model.infer",
performedBy: aiId,
cid: outputCid,
inputs: [{ cid: promptHash }],
sessionId,
extensions: {
"ext:ai@1.0.0": aiExtension.parse({
provider: "openai",
model: "gpt-4o",
promptHash,
tokensUsed: completion.usage?.total_tokens,
promptTokens: completion.usage?.prompt_tokens,
completionTokens: completion.usage?.completion_tokens,
finishReason: completion.choices[0].finish_reason,
}),
},
attributions: [
{ entityId: humanId, role: "prompter", confidence: 1.0 },
],
});
return { output, outputCid };
}