import os import platform import json from django.http import JsonResponse from django.shortcuts import render from django.utils import timezone from django.db.models import Q from .models import InventoryItem def home(request): """Render the landing screen with the conversational AI shell.""" featured_items = InventoryItem.objects.all()[:3] context = { "project_name": "AI Inventory Assistant", "featured_items": featured_items, } return render(request, "core/index.html", context) def chat_api(request): """Simple keyword-based search to simulate AI conversational agent.""" if request.method == "POST": try: data = json.loads(request.body) query = data.get("message", "").lower() if not query: return JsonResponse({"reply": "I'm listening! What are you looking for?"}) # Simulate "AI" logic by filtering models # In a real scenario, this would call an LLM with the context of the inventory results = InventoryItem.objects.filter( Q(name__icontains=query) | Q(description__icontains=query) | Q(features__icontains=query) | Q(store_name__icontains=query) | Q(store_location__icontains=query) ).distinct() if results.exists(): count = results.count() reply = f"I found {count} options that match your request. Here are the best ones:" items = [] for item in results[:5]: items.append({ "name": item.name, "cost": str(item.cost), "store": item.store_name, "location": item.store_location, "installments": item.installments_options, "lead_time": item.lead_time }) return JsonResponse({"reply": reply, "items": items}) else: return JsonResponse({"reply": "I couldn't find any items matching those exact details. Try asking about cost, location, or specific features!"}) except Exception as e: return JsonResponse({"error": str(e)}, status=400) return JsonResponse({"error": "Invalid request"}, status=405)