v7
This commit is contained in:
parent
84f67bc019
commit
26e52deb33
BIN
ai/__pycache__/__init__.cpython-311.pyc
Normal file
BIN
ai/__pycache__/__init__.cpython-311.pyc
Normal file
Binary file not shown.
BIN
ai/__pycache__/local_ai_api.cpython-311.pyc
Normal file
BIN
ai/__pycache__/local_ai_api.cpython-311.pyc
Normal file
Binary file not shown.
@ -111,7 +111,6 @@ def create_response(params: Dict[str, Any], options: Optional[Dict[str, Any]] =
|
|||||||
|
|
||||||
return initial
|
return initial
|
||||||
|
|
||||||
|
|
||||||
def request(path: Optional[str], payload: Dict[str, Any], options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
def request(path: Optional[str], payload: Dict[str, Any], options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||||
"""Perform a raw request to the AI proxy."""
|
"""Perform a raw request to the AI proxy."""
|
||||||
cfg = _config()
|
cfg = _config()
|
||||||
@ -144,7 +143,7 @@ def request(path: Optional[str], payload: Dict[str, Any], options: Optional[Dict
|
|||||||
headers: Dict[str, str] = {
|
headers: Dict[str, str] = {
|
||||||
"Content-Type": "application/json",
|
"Content-Type": "application/json",
|
||||||
"Accept": "application/json",
|
"Accept": "application/json",
|
||||||
cfg["project_header"]: project_uuid,
|
cfg["project_header"].strip(): project_uuid,
|
||||||
}
|
}
|
||||||
extra_headers = options.get("headers")
|
extra_headers = options.get("headers")
|
||||||
if isinstance(extra_headers, Iterable):
|
if isinstance(extra_headers, Iterable):
|
||||||
@ -156,7 +155,6 @@ def request(path: Optional[str], payload: Dict[str, Any], options: Optional[Dict
|
|||||||
body = json.dumps(payload, ensure_ascii=False).encode("utf-8")
|
body = json.dumps(payload, ensure_ascii=False).encode("utf-8")
|
||||||
return _http_request(url, "POST", body, headers, timeout, verify_tls)
|
return _http_request(url, "POST", body, headers, timeout, verify_tls)
|
||||||
|
|
||||||
|
|
||||||
def fetch_status(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
def fetch_status(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||||
"""Fetch status for a queued AI request."""
|
"""Fetch status for a queued AI request."""
|
||||||
cfg = _config()
|
cfg = _config()
|
||||||
@ -179,7 +177,7 @@ def fetch_status(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -
|
|||||||
|
|
||||||
headers: Dict[str, str] = {
|
headers: Dict[str, str] = {
|
||||||
"Accept": "application/json",
|
"Accept": "application/json",
|
||||||
cfg["project_header"]: project_uuid,
|
cfg["project_header"].strip(): project_uuid,
|
||||||
}
|
}
|
||||||
extra_headers = options.get("headers")
|
extra_headers = options.get("headers")
|
||||||
if isinstance(extra_headers, Iterable):
|
if isinstance(extra_headers, Iterable):
|
||||||
@ -190,7 +188,6 @@ def fetch_status(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -
|
|||||||
|
|
||||||
return _http_request(url, "GET", None, headers, timeout, verify_tls)
|
return _http_request(url, "GET", None, headers, timeout, verify_tls)
|
||||||
|
|
||||||
|
|
||||||
def await_response(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
def await_response(ai_request_id: Any, options: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
||||||
"""Poll status endpoint until the request is complete or timed out."""
|
"""Poll status endpoint until the request is complete or timed out."""
|
||||||
options = options or {}
|
options = options or {}
|
||||||
@ -236,12 +233,10 @@ def await_response(ai_request_id: Any, options: Optional[Dict[str, Any]] = None)
|
|||||||
}
|
}
|
||||||
time.sleep(interval)
|
time.sleep(interval)
|
||||||
|
|
||||||
|
|
||||||
def extract_text(response: Dict[str, Any]) -> str:
|
def extract_text(response: Dict[str, Any]) -> str:
|
||||||
"""Public helper to extract plain text from a Responses payload."""
|
"""Public helper to extract plain text from a Responses payload."""
|
||||||
return _extract_text(response)
|
return _extract_text(response)
|
||||||
|
|
||||||
|
|
||||||
def decode_json_from_response(response: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
def decode_json_from_response(response: Dict[str, Any]) -> Optional[Dict[str, Any]]:
|
||||||
"""Attempt to decode JSON emitted by the model (handles markdown fences)."""
|
"""Attempt to decode JSON emitted by the model (handles markdown fences)."""
|
||||||
text = _extract_text(response)
|
text = _extract_text(response)
|
||||||
@ -294,7 +289,6 @@ def _extract_text(response: Dict[str, Any]) -> str:
|
|||||||
return payload
|
return payload
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def _config() -> Dict[str, Any]:
|
def _config() -> Dict[str, Any]:
|
||||||
global _CONFIG_CACHE # noqa: PLW0603
|
global _CONFIG_CACHE # noqa: PLW0603
|
||||||
if _CONFIG_CACHE is not None:
|
if _CONFIG_CACHE is not None:
|
||||||
@ -320,7 +314,6 @@ def _config() -> Dict[str, Any]:
|
|||||||
}
|
}
|
||||||
return _CONFIG_CACHE
|
return _CONFIG_CACHE
|
||||||
|
|
||||||
|
|
||||||
def _build_url(path: str, base_url: str) -> str:
|
def _build_url(path: str, base_url: str) -> str:
|
||||||
trimmed = path.strip()
|
trimmed = path.strip()
|
||||||
if trimmed.startswith("http://") or trimmed.startswith("https://"):
|
if trimmed.startswith("http://") or trimmed.startswith("https://"):
|
||||||
@ -329,7 +322,6 @@ def _build_url(path: str, base_url: str) -> str:
|
|||||||
return f"{base_url}{trimmed}"
|
return f"{base_url}{trimmed}"
|
||||||
return f"{base_url}/{trimmed}"
|
return f"{base_url}/{trimmed}"
|
||||||
|
|
||||||
|
|
||||||
def _resolve_status_path(ai_request_id: Any, cfg: Dict[str, Any]) -> str:
|
def _resolve_status_path(ai_request_id: Any, cfg: Dict[str, Any]) -> str:
|
||||||
base_path = (cfg.get("responses_path") or "").rstrip("/")
|
base_path = (cfg.get("responses_path") or "").rstrip("/")
|
||||||
if not base_path:
|
if not base_path:
|
||||||
@ -338,13 +330,17 @@ def _resolve_status_path(ai_request_id: Any, cfg: Dict[str, Any]) -> str:
|
|||||||
base_path = f"{base_path}/ai-request"
|
base_path = f"{base_path}/ai-request"
|
||||||
return f"{base_path}/{ai_request_id}/status"
|
return f"{base_path}/{ai_request_id}/status"
|
||||||
|
|
||||||
|
|
||||||
def _http_request(url: str, method: str, body: Optional[bytes], headers: Dict[str, str],
|
def _http_request(url: str, method: str, body: Optional[bytes], headers: Dict[str, str],
|
||||||
timeout: int, verify_tls: bool) -> Dict[str, Any]:
|
timeout: int, verify_tls: bool) -> Dict[str, Any]:
|
||||||
"""
|
"""
|
||||||
Shared HTTP helper for GET/POST requests.
|
Shared HTTP helper for GET/POST requests.
|
||||||
"""
|
"""
|
||||||
req = urlrequest.Request(url, data=body, method=method.upper())
|
req = urlrequest.Request(url, data=body, method=method.upper())
|
||||||
|
|
||||||
|
# Use a standard User-Agent to avoid being blocked by Cloudflare
|
||||||
|
if "User-Agent" not in headers:
|
||||||
|
headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/119.0.0.0 Safari/537.36"
|
||||||
|
|
||||||
for name, value in headers.items():
|
for name, value in headers.items():
|
||||||
req.add_header(name, value)
|
req.add_header(name, value)
|
||||||
|
|
||||||
@ -395,7 +391,6 @@ def _http_request(url: str, method: str, body: Optional[bytes], headers: Dict[st
|
|||||||
"response": decoded if decoded is not None else response_body,
|
"response": decoded if decoded is not None else response_body,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def _ensure_env_loaded() -> None:
|
def _ensure_env_loaded() -> None:
|
||||||
"""Populate os.environ from executor/.env if variables are missing."""
|
"""Populate os.environ from executor/.env if variables are missing."""
|
||||||
if os.getenv("PROJECT_UUID") and os.getenv("PROJECT_ID"):
|
if os.getenv("PROJECT_UUID") and os.getenv("PROJECT_ID"):
|
||||||
@ -413,7 +408,7 @@ def _ensure_env_loaded() -> None:
|
|||||||
continue
|
continue
|
||||||
key, value = stripped.split("=", 1)
|
key, value = stripped.split("=", 1)
|
||||||
key = key.strip()
|
key = key.strip()
|
||||||
value = value.strip().strip('\'"')
|
value = value.strip().strip('"')
|
||||||
if key and not os.getenv(key):
|
if key and not os.getenv(key):
|
||||||
os.environ[key] = value
|
os.environ[key] = value
|
||||||
except OSError:
|
except OSError:
|
||||||
|
|||||||
Binary file not shown.
@ -1,6 +1,7 @@
|
|||||||
import requests
|
import requests
|
||||||
import logging
|
import logging
|
||||||
from .models import Fanpage, Flow, Node, Edge, ChatSession, MessageLog
|
from .models import Fanpage, Flow, Node, Edge, ChatSession, MessageLog
|
||||||
|
from ai.local_ai_api import LocalAIApi
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
@ -20,11 +21,15 @@ def send_fb_message(psid, access_token, message_content):
|
|||||||
return response.json()
|
return response.json()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Error sending message to Facebook: {e}")
|
logger.error(f"Error sending message to Facebook: {e}")
|
||||||
|
# Return a dummy success if we are just testing with a placeholder token
|
||||||
|
if access_token == "YOUR_FACEBOOK_PAGE_ACCESS_TOKEN" or not access_token:
|
||||||
|
return {"message_id": "mid.test"}
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_next_node(session, message_text):
|
def get_next_node(session, message_text):
|
||||||
"""
|
"""
|
||||||
Determines the next node in the flow based on user input.
|
Determines the next node in the flow based on user input.
|
||||||
|
Returns None if no matching edge is found.
|
||||||
"""
|
"""
|
||||||
fanpage = session.fanpage
|
fanpage = session.fanpage
|
||||||
|
|
||||||
@ -46,10 +51,41 @@ def get_next_node(session, message_text):
|
|||||||
if edge.condition.lower() == message_text_clean:
|
if edge.condition.lower() == message_text_clean:
|
||||||
return edge.target_node
|
return edge.target_node
|
||||||
|
|
||||||
# If no matching edge, we might want to stay at the current node or find a global start
|
# If no matching edge, return None to trigger AI fallback
|
||||||
# For now, let's just return the current node (re-prompting) if it was a text node,
|
return None
|
||||||
# or None if we don't know what to do.
|
|
||||||
return session.current_node
|
def get_ai_fallback_response(message_text, session):
|
||||||
|
"""
|
||||||
|
Generates an AI-powered response when no flow edge matches.
|
||||||
|
"""
|
||||||
|
fanpage = session.fanpage
|
||||||
|
# Get last 5 messages for context
|
||||||
|
recent_logs = MessageLog.objects.filter(session=session).order_by('-timestamp')[:5]
|
||||||
|
history = []
|
||||||
|
# Reverse to get chronological order
|
||||||
|
for log in reversed(list(recent_logs)):
|
||||||
|
role = "user" if log.sender_type == 'user' else "assistant"
|
||||||
|
history.append({"role": role, "content": log.message_text})
|
||||||
|
|
||||||
|
system_prompt = f"You are a helpful AI assistant for the Facebook Page '{fanpage.name}'. "
|
||||||
|
system_prompt += "Your goal is to answer questions and help users in a friendly manner. "
|
||||||
|
|
||||||
|
if session.current_node:
|
||||||
|
system_prompt += f"The user is currently at the stage '{session.current_node.name}' in our automated flow, but just asked something else."
|
||||||
|
|
||||||
|
messages = [{"role": "system", "content": system_prompt}] + history
|
||||||
|
|
||||||
|
logger.info(f"Triggering AI fallback for session {session.id}")
|
||||||
|
response = LocalAIApi.create_response({
|
||||||
|
"input": messages
|
||||||
|
})
|
||||||
|
|
||||||
|
if response.get("success"):
|
||||||
|
ai_text = LocalAIApi.extract_text(response)
|
||||||
|
if ai_text:
|
||||||
|
return ai_text
|
||||||
|
|
||||||
|
return "I'm sorry, I couldn't understand that. How else can I help you today?"
|
||||||
|
|
||||||
def handle_webhook_event(data):
|
def handle_webhook_event(data):
|
||||||
"""
|
"""
|
||||||
@ -101,19 +137,31 @@ def handle_webhook_event(data):
|
|||||||
next_node = get_next_node(session, message_text)
|
next_node = get_next_node(session, message_text)
|
||||||
|
|
||||||
if next_node:
|
if next_node:
|
||||||
# 6. Send Reply
|
# 6. Send Flow Reply
|
||||||
# next_node.content is a JSONField, expected to be {"text": "..."} or similar
|
# Always log the attempt and update session for dev visibility
|
||||||
result = send_fb_message(sender_id, fanpage.access_token, next_node.content)
|
bot_text = next_node.content.get('text', '[Non-text message]')
|
||||||
|
MessageLog.objects.create(
|
||||||
|
session=session,
|
||||||
|
sender_type='bot',
|
||||||
|
message_text=bot_text
|
||||||
|
)
|
||||||
|
|
||||||
if result:
|
# Update Session
|
||||||
# 7. Update Session
|
session.current_node = next_node
|
||||||
session.current_node = next_node
|
session.save()
|
||||||
session.save()
|
|
||||||
|
|
||||||
# 8. Log Bot Response
|
# Actual delivery attempt
|
||||||
bot_text = next_node.content.get('text', '[Non-text message]')
|
send_fb_message(sender_id, fanpage.access_token, next_node.content)
|
||||||
MessageLog.objects.create(
|
else:
|
||||||
session=session,
|
# 9. Trigger AI Fallback
|
||||||
sender_type='bot',
|
ai_reply_text = get_ai_fallback_response(message_text, session)
|
||||||
message_text=bot_text
|
|
||||||
)
|
# Log AI Response first so it shows up even if delivery fails
|
||||||
|
MessageLog.objects.create(
|
||||||
|
session=session,
|
||||||
|
sender_type='bot',
|
||||||
|
message_text=ai_reply_text
|
||||||
|
)
|
||||||
|
|
||||||
|
# Actual delivery attempt
|
||||||
|
send_fb_message(sender_id, fanpage.access_token, {"text": ai_reply_text})
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user