102 lines
3.4 KiB
JavaScript
102 lines
3.4 KiB
JavaScript
// Service Worker for Smart Survey Filler
|
|
// This acts as the bridge between the content script and the Backend API
|
|
|
|
const DEFAULT_BACKEND_URL = 'http://localhost:3000/api';
|
|
|
|
chrome.runtime.onInstalled.addListener(() => {
|
|
console.log('Smart Survey Filler AI Extension Installed');
|
|
});
|
|
|
|
async function getConfig() {
|
|
const data = await chrome.storage.local.get(['backendUrl']);
|
|
return {
|
|
backendUrl: data.backendUrl || DEFAULT_BACKEND_URL
|
|
};
|
|
}
|
|
|
|
// Helper to fetch data from backend (No Auth Required)
|
|
async function fetchFromBackend(endpoint, method = 'GET', body = null) {
|
|
const { backendUrl } = await getConfig();
|
|
|
|
const headers = {
|
|
'Content-Type': 'application/json',
|
|
};
|
|
|
|
const options = { method, headers };
|
|
if (body) options.body = JSON.stringify(body);
|
|
|
|
// Remove trailing slash if present in backendUrl
|
|
const baseUrl = backendUrl.replace(/\/$/, '');
|
|
|
|
try {
|
|
const response = await fetch(`${baseUrl}${endpoint}`, options);
|
|
|
|
if (!response.ok) {
|
|
const errText = await response.text();
|
|
throw new Error(`API Error (${response.status}): ${errText}`);
|
|
}
|
|
|
|
return response.json();
|
|
} catch (err) {
|
|
console.error('Fetch error:', err);
|
|
throw err;
|
|
}
|
|
}
|
|
|
|
// Handle messages from content script or popup
|
|
chrome.runtime.onMessage.addListener((request, sender, sendResponse) => {
|
|
// Return true to indicate we will send a response asynchronously
|
|
|
|
if (request.type === 'FETCH_PERSONAS') {
|
|
fetchFromBackend('/profiles')
|
|
.then(data => sendResponse({ success: true, data: data.rows }))
|
|
.catch(error => sendResponse({ success: false, error: error.message }));
|
|
return true;
|
|
}
|
|
|
|
if (request.type === 'START_FILLING') {
|
|
const { surveyData, personaId } = request;
|
|
|
|
// 1. Get Persona Data
|
|
fetchFromBackend(`/profiles/${personaId}`)
|
|
.then(persona => {
|
|
if (!persona) throw new Error('Persona not found');
|
|
|
|
// 2. Prepare AI Prompt
|
|
const prompt = `
|
|
You are acting as the following person:
|
|
Name: ${persona.name}
|
|
Details: ${JSON.stringify(persona.persona_data)}
|
|
|
|
Here is a list of survey questions found on the page:
|
|
${JSON.stringify(surveyData)}
|
|
|
|
Please provide the best answers for each question in a valid JSON object format where keys are the field IDs and values are the answers.
|
|
Example format: { "field_0": "John Doe", "field_1": "Engineer" }
|
|
Do not include markdown formatting like
|
|
Just the raw JSON.
|
|
`;
|
|
|
|
// 3. Call AI Proxy
|
|
// We use the existing /openai/response endpoint which wraps the AI call
|
|
return fetchFromBackend('/openai/response', 'POST', {
|
|
input: [
|
|
{ role: 'system', content: 'You are an expert survey filler helper. Return ONLY valid JSON.' },
|
|
{ role: 'user', content: prompt }
|
|
]
|
|
});
|
|
})
|
|
.then(aiResponse => {
|
|
// AI Response from backend might be a complex object or just text depending on the backend wrapper
|
|
// The backend wrapper usually returns { success: true, data: ... } or similar.
|
|
// Assuming the standard LocalAIApi response structure which the content script tries to parse.
|
|
sendResponse({ success: true, answers: aiResponse });
|
|
})
|
|
.catch(error => {
|
|
console.error('Filling error:', error);
|
|
sendResponse({ success: false, error: error.message });
|
|
});
|
|
|
|
return true;
|
|
}
|
|
}); |