30 lines
1.2 KiB
PHP
30 lines
1.2 KiB
PHP
<?php
|
|
require_once __DIR__ . '/../ai/LocalAIApi.php';
|
|
|
|
function moderateContent($content) {
|
|
if (empty(trim($content))) return ['is_safe' => true];
|
|
|
|
// Bypass moderation for video platforms as they are handled by their own safety measures
|
|
// and often trigger false positives in AI moderation due to "lack of context".
|
|
if (preg_match('/(?:https?:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be|dailymotion\.com|dai\.ly|vimeo\.com)\//i', $content)) {
|
|
return ['is_safe' => true];
|
|
}
|
|
|
|
$resp = LocalAIApi::createResponse([
|
|
'input' => [
|
|
['role' => 'system', 'content' => 'You are a content moderator. Analyze the message and return a JSON object with "is_safe" (boolean) and "reason" (string, optional). Safe means no hate speech, extreme violence, or explicit sexual content. Do not flag URLs as unsafe simply because you cannot see the content behind them.'],
|
|
['role' => 'user', 'content' => $content],
|
|
],
|
|
]);
|
|
|
|
if (!empty($resp['success'])) {
|
|
$result = LocalAIApi::decodeJsonFromResponse($resp);
|
|
if ($result && isset($result['is_safe'])) {
|
|
return $result;
|
|
}
|
|
}
|
|
|
|
// Default to safe if AI fails, to avoid blocking users
|
|
return ['is_safe' => true];
|
|
}
|