From 4c1df17a4696d56887e17af25ef52afc6014577d Mon Sep 17 00:00:00 2001 From: Flatlogic Bot Date: Thu, 20 Nov 2025 19:22:48 +0000 Subject: [PATCH] async AI job --- core/__pycache__/models.cpython-311.pyc | Bin 4089 -> 4089 bytes core/__pycache__/urls.cpython-311.pyc | Bin 1176 -> 1176 bytes core/__pycache__/views.cpython-311.pyc | Bin 18996 -> 21642 bytes core/templates/core/chat.html | 57 ++++++ core/views.py | 254 ++++++++++++++---------- 5 files changed, 204 insertions(+), 107 deletions(-) diff --git a/core/__pycache__/models.cpython-311.pyc b/core/__pycache__/models.cpython-311.pyc index 8072a303a8507e71db2cb0f28b17ef15a08c0788..c6ae9db56a250a75ec9adf5b8fd67e319a2ea5e2 100644 GIT binary patch delta 20 acmew<|5KiOIWI340}u$s$#3L-#}5EK(*;ie delta 20 acmew<|5KiOIWI340}vE_lH17rjvoL+<_1Cl diff --git a/core/__pycache__/urls.cpython-311.pyc b/core/__pycache__/urls.cpython-311.pyc index 18d56872decaf9d252d236d729d7e42844664bf9..0a046db79f7a97fa9a38d3c364c517d7882c8501 100644 GIT binary patch delta 20 acmbQiIfIjXIWI340}u$s$#3NDWdQ&yhy(Qi delta 20 acmbQiIfIjXIWI340}$-_B)5^fmjwVbp#?Gk diff --git a/core/__pycache__/views.cpython-311.pyc b/core/__pycache__/views.cpython-311.pyc index 8f27c3c0a4f42a9cf3bed96e020b4a3494b8e639..e1d23ecb2793b46e9aa2de9c18a371bbfbb12ed8 100644 GIT binary patch delta 6936 zcmbt2TWlLwb~8f`AL8&K&G02k;*gZAhb<|#tXQ!Wd-bqnC9u zJB|@p$T;CZywMeMzK>(r2XN=E5f^8C0~>L3B>>Ag6F^T$&rxq+@56+v|7m^2oI^jz zQ(z;NFv0RfZxv?+VlQU{$Z~eB^bLH($CbgGpQ8Z=I0j%frya)X9m{R_vIe&;|A&^< z;B}JO@FTodx^DO%yk_~jaRJA7EK;X|UJ1j>Cynp$uZ2-q#S^j0|OD6p2vbFLj znu-k*KrtiS8n}N)nK0}+W|SJOkOXJ=pPGagC^%&}~p^MJ=a2qf+u3j*M#~2I$eP_Yj;ut`&(zy_E9Xw`+mY zkf-DfH;7`7?e&&$ zI<8bKc?Wx!P??wnc9Y3sDVp6~_P7y3&E_1AE*|Z{lEiUyoO+TE$%qtZmws98?ylXW zpJ4L1Xb?@~gh;3cIXEZh3gcm%b3g9HxLGtW+L2~-d9m!+YXU3nt@e?g{0yq1V21P$ z)y@j8GHwx?@-mU5^7x_)E#Zr}XyLTCtKNs0`T)h1df^z3CGp6Qg+k5U`BBkaAO$+( z7SR&x1YJ67e2w1XG1#czp~3LeH0U#8!mhkr(7qy%irYO1l8#ce`+o)w0PKrDS;>uxL+ZJd82(zOtGx_H^s+_x1MOU1G$-B-v5$0EN_ zcg)8=hgK}*3{tet?tIFcjD4#|B8en^MGj!Xvzy9KRONADPaeatQ$#h!Z^5yrDX(9b zBQ@81adq`1vBp{jOI97HpGMe>i}W`IO=J;N``)G!pn?==WA5$L%?7!u6v4+q5}?aSd_%C40n>d(Pl9 z*agiIYz9|Hm`jX00?*A6uwdg`TeQjSXp^bm=c0cRe4n>|)AzZWYZ%5r?Lub$yy@Uh zKdh*_=UcXk*gF{4_^zhd890WN9DaNm8uQC=q)DY~Ru@CdeZykpjYM3)0Yzp8&{7rZ zw)r*rKN{+vd77ENyqVjdV&=@_MsCX+nfQZ^oO!~;+)CF!Gh27)bU4PInvRSK;b?^A zrz0^o9ASfOEOa)=2ZazToaRG8u8Hr01v-EDmDf}U&pc-5h&RN>DY`+MUp-9eei&`Al#VE zVM~WrTL)HJ2U4wrO6y>*PdzYk_gGtX_Syrs!dVDtG99}6OH z^5>DWbg@u`3-Qt?&;5O3S7(Hsj)X3p4UGvQF2{k5j*U(8JiLP6H=V(P=yflFj1bbM z0;t2_Qa%z~m%c%}o2}PmtcAiy!<;Igzk*2HyQ9;ST!M{61vYd(6k%h*^Jr;9>pD!q zVg$#bC#rT9GTO}!_RiY73+!TN!oq2`zlWV|XSrx7mg9N>mW-V_4ZH>ZVmLC6c%2G^ zs^Qttx?Ksh%6Y4rW_PJdsnm~6ES$%lit=osLM(p?OsWs?Bs_S?53!6n92*@EMM4m; zAel!gQ^v4vQ;e@f^N<_rGP>Z|v#=aF&zMoa2hBB)hlJ56KRO1X4*tC$O|`e%I?ym$ zTpp!ud|0~G{@kt%)dLm}MumOI-g*cBr)ca=02w{(Bq7RQ%xI=!JFMv^~b7D&Pj(v0p(l44X=Ojv#08HD}$=D(*DP&KFY7 z7Zm3Ub5y#fevVQM{*v zdzOZOepL2!LQ7Fy3e_d6?SZfEFTAV1JuALFX;4;Ky;L|D+t4kOR*ufsQ-pQ-OUlw9^x^fz9eLdu7_$oP~KU`-q3|aP1?q z@KJ}n5&S-w6&V5M$ph;$^VlGS%|H=~JBB?=${1 z;{zGxWyr>ghk8wU1-PT!zu0^8fE?H@yV|oRU5h1)0eFb%T-L7;nA@MVVNAvKj%yu@ z#uVM4&<$BkYj&*J9M`GDHA=1=x!d;79sk%N551P^KCE;fh8}1s+fl`KRJI*Wm-i<1 zYc}V+b=9_I#kOT>;9jjfazeIkN!dmf+o)_CP222O$p;QraWvjMopS6_9J}WF(scQT zAj>t2?A?{JwJEkX+19qfGVvWev)VGy)El@ld$Ts>-Jy7QB&jrAu}I1EHfZTU^J<`F zCD5|ee}_*6x|BfIYM^%|(0dnRVo(VTCI{APb}k*eTd%;+qUL~7b09f*zh?W=iItje zxu$y|acJStJy)`Kjq%JMTxA+on1(yly$Je)FbyeYQeh@#X7au>uy|?3*(y6*)2_+| z@;>9aK6q{LX78O#a$pcziaDq-2W7QAC}K4(pE#+q8dI1tnHftn?&Q9QddxnAKl<8$ zCG2j6sry;IOz(uqGdniHcRD;tCdn)bG>;x3PG)tF$)2Mr>Xuq&IdcwPp=F_{f;|KxA7Yrfi}*@~ z&yGW*&WZd6m?0VF3AOL-Nsp!HV+YPLNzih51*9XVbo5I zuOq?*WbH+Tx+s5BLnb1FXH|o<=5Mj@yUa^~iT^L-zSXin%rKB&WM8paj~>PD!;kEb z`^|$+?ANv~!+@Lk#6`f%C*|#ce`?nP{;9j|fSvqJsRr=hFgU{PojAOF=D>%mu+N+& zLpwE}`Mg8Tn$Pha2X_*mx8MlhSu#|cAV1&Z9rBZMB?(=!pB$pi*px;=Suh_&ey{+7&ftS zeg@|QQt8$TZCoQT(x1^=gBmzMGu|$Fyg(J4nt8 zB7O?LZ7MF8<0MCkJ`%j>F=5u*C7C z%Fo30LLko=&P6cJBEqR#;Q?6Dkq|BYKvQG4-p~}A2FIQ@>7A`@^|>7IN5D}nl6V1O zmjOU2q!WTXB$xbAcs(TjVt1dD;azCF6akcCX2_VphtEoMdv(H?E4Cnq&FBuPb(fm2 zWprFHG!>1YsX<=ICA1I_7>`axcrSo@1J5E{O-Dx&hH_FKDFr&HtTN`pEIP*j6^zA@ zBiZ$fZ&dR~TGXXZuO9DsJy3IKln^Bt>nV1*7W_NM5q z3cWR#NT%o(g>F$(Mo1#f-Zk$tA6DFLmc1{hye})>my=ZSf3I48?YNp~oKS3#ah&*C zhm{8ucm0>!56G`@@}X05i+eof9#`DsNnM()PU_~(X_VMpqpn)!ElJA*+9Ovq-L=Z} zP>LQ>=pmUJdLZ5HvHQ~%tgLmX_3+Qd$wT-WyZys$clXNdaEcvP*kRdFDfM@^5q!1u zG3l26y*r8Dk}mf6Jb#0nip~yLR?WuwTY#(Jy!2tu_Z(;_f1&yXt>W(#sovT`Oc2t? zj92=p)hY!EhlA5_aB@)`a&e7xMdy5;X^4Pch3H1SciZkAkq>hzYe=z%Uf;iFa<0`ft_9cC!TG_oyGC)hpyXPIl4~7Q1(Y7D0!k0B zq<;CgJuSEm)nWVv1cC@$ly>wE;1{GsZ@Eq#dRuz0mu~t= zrE+(g@}VjG}}zz;wkQ2ev^Z28HHXYmOUM$nhOALr$VMTdKrlw^(UMRJ&`8inN<*d4nj(%U6JjVtqj2F| znWj@K;quQYgdV_`DWX+x2o0iK8;63?BS1eWmsUEg7_FxiY86f9kEq=gyRsSn@msQI&ObMjSiS zSe(?0KaeTFKgy}tJY%;f^is*5uw=kP=c-%(c5n%OtNXtoEJ$H%7udY-K@i+POu~jVIr3tfOmCZt#*4NfbSS0^`&3Ss&fjuQYtoN2{J{l5Mp?sCW zl)22{yoF?IFaBFY6_cHv&!sJBi^a^xLvQhAO-S@Vc7~i>2nmkohZpk z7gWk60*CMWTvQWQ{N;_ZeVUeySqrO#YQa5A%~AsYYFrYfaw!fDJ8`DQrjJKK*N1f; zoj1oU^BuI{$4Kz!MqaOZb>t`dgB&=NwF$@JeBdD;9D@09K2Ru0#nL%vf^~AK7LHd{ zN=*f@(mgIs?=WA}jDbL16igVedFRff5^q}JstlMTYr z-Sc3E)U+nb56Ao{m|P~8ACCF{=@g3nDtnwD76pZJnP9`es4cS`I?$ot?tg4KN#Evu zNNU@&b&k)|Qg@z0=o;-uVl9OZ+iJ?I$JguJmczSr>|kJEmyRc#ht-?_HO_BJv)CNi z*Slv4*ztCP&E@@IKxVV2@WjKW++O76V3}s=Y`%w(%pNpDoApiE-jes`)!T0z-mC=s zT{dRQwM*0zx}h6KOO#~6+h;wNgE#g6c-+3%4lbE+$&^@r=sM!r}r557DZ!8$4CQ!4m{El&5thjHrI#G`W=3~FEB>3-+k>JA3yNB1-1G+f`QS)DFNUu# zgyRx_ZE`je7Wk!EX@-wWlhQ(*7msLy*5mw{g-C=Ci{iZ4#&4U%aQwabSUilEPaUnl zSps^*$tBQ}YYU3}7VNJ6m7|Utp=clF? zL@_LY-0S^k?R6rn*{MBIVmkoF{r;1tXgD69oDRpuDviGgux*%p?>&fmHq4^-!f#uq z!_vgOI58!LC#A3`V}JW;st?b%pSmn+53{IUR-zlAp+e*}`nm=a8b}&sBlJU!iUMpK zXJ+Hly!Z#(x@de_)Lt6#J6V2i0Riu{kGw11&ax(FCp2fZ1c^o`W5UE-d_J~q&G#Te z3lS-2xaHbxY%((O(j{?Nn{U~lSMhiS@c2UoA3N`=`V@xlz!#r|6ZAex8?Cp#yHcJq zx)q~4ji=6g>F@X9)$;-SV~ZoXbVnu@pJMT?*%bV*=gXGA-@D^~{j134GVxzg{8v=} z6=L>0wmF`nC~dh+rQN=ydB^U&W81RVrtGz(rJsylA>X=2?6r!0TD4CT`}E@qE@@0x zo=_`KZB=%sD!Vt^mC6ydawLNcB^Rh1CVSGA)%V(xy=iCViu943l+*6Um1X^kKJBVn zWmB#O;%Z2{YoO0memwNi&|2@t*srb9|4;i5oZxfxAe}vdPqoASOaq{5oX^lQU~({1lwExM z(J|s1hngwS)xfF%$-(DEsI2ni3m;t|e3!xmRVGN7;FBuvYSSlg-Fqv8-Y8oFhc#TG zcG&9s#dnK|zhk3hqvZ08f-G1d3xWBz!WKL$(GR%Z;$$W#B+sH?4q z2-P0{!mwc>fiuK^HdAcqtbmQ{tayqH{>o<>te7%(#Mj@K@5*caiu#0OmAEK47}`;>y__&UbHki)&7C&7|uxnYc#gJE?K=>1!k>g-db+h>0sMMugPB)(wU z->}-Jor3z$tv|JH7#?+yKnSYBUsd_5BwL@GVEBOgITBw2)e3(@XWAe&v@J$UkR2bX`x7OwQI^ks}N?Zt~1&&|l?zHhy2C3VvZ zJEO8Qgq=y($5$*ntoxpAi#?uVkCXOMa&?AWUm)ypgRJTJ-p50+8?+kA-jVY#) zv<{Lh?~v(t3Dc-B5tWIMLlw+^0rtbfngd0M;7Y`(<5AeMK zPvy_F2Soef$@8KY8u5cXT*2vqF8rSt%GPU!S<0@xccQGpqy`B*I5I%V`0hxBAv^R5 z{;Ltb{c{+*3O~x@_kegGJ@GYu+PUTHO8L6dW>31*wbGo_?rMKfyQ}>MxU2mJMb8tx zbH$0vMsHAF{K2Sze>3`r0quFqS)(caITHOEL$mR}P3_TnVIdOk5Pu0>?SBCA`}of< zzJr^`%JGS@QhaW#yGdV4y^%q?P;aJMGiVn$ni_<>*oP+k*JG`V45F { + fetch(`/get_conversation_messages/${conversationId}/`) + .then(response => response.json()) + .then(data => { + const messagesContainer = document.getElementById('chat-messages'); + messagesContainer.innerHTML = ''; // Clear existing messages + data.messages.forEach(message => { + const messageEl = document.createElement('div'); + messageEl.classList.add('message', message.sender); + + const messageContentEl = document.createElement('div'); + messageContentEl.classList.add('message-content'); + + const authorEl = document.createElement('div'); + authorEl.classList.add('message-author'); + if (message.sender === 'user') { + authorEl.innerText = 'You'; + } else if (message.sender === 'ai') { + authorEl.innerText = 'AI'; + } else if (message.sender === 'system') { + authorEl.innerText = 'System'; + } else if (message.sender === 'ai_command') { + authorEl.innerText = 'AI Command'; + } + messageContentEl.appendChild(authorEl); + + if (message.sender === 'ai_command') { + const preEl = document.createElement('pre'); + const codeEl = document.createElement('code'); + codeEl.innerHTML = message.content.replace(/\n/g, '
'); + preEl.appendChild(codeEl); + messageContentEl.appendChild(preEl); + } else { + const pEl = document.createElement('p'); + pEl.innerHTML = message.content.replace(/\n/g, '
'); + messageContentEl.appendChild(pEl); + } + + messageEl.appendChild(messageContentEl); + messagesContainer.appendChild(messageEl); + }); + messagesContainer.scrollTop = messagesContainer.scrollHeight; + + if (!data.is_generating) { + clearInterval(pollingInterval); + if (loaderOverlay) { + loaderOverlay.style.display = 'none'; + } + } + }); + }, 2000); + } }); diff --git a/core/views.py b/core/views.py index 29c919f..a6ee8e2 100644 --- a/core/views.py +++ b/core/views.py @@ -3,6 +3,7 @@ from django.http import JsonResponse from django.views.decorators.http import require_POST import json import logging +import threading from .models import Article, TodoItem, Conversation, Message, Setting from .forms import TodoItemForm import time @@ -11,6 +12,7 @@ from ai.local_ai_api import LocalAIApi # Get an instance of a logger logger = logging.getLogger(__name__) + def index(request): if request.method == 'POST': form = TodoItemForm(request.POST) @@ -31,6 +33,7 @@ def index(request): } return render(request, "core/index.html", context) + def kanban_board(request): tasks = TodoItem.objects.all().order_by('created_at') tasks_by_status = { @@ -45,10 +48,12 @@ def kanban_board(request): } return render(request, "core/kanban.html", context) + def article_detail(request, article_id): article = Article.objects.get(pk=article_id) return render(request, "core/article_detail.html", {"article": article}) + @require_POST def update_task_status(request): try: @@ -76,11 +81,13 @@ def delete_task(request, task_id): return redirect(referer) return redirect('core:index') + @require_POST def cleanup_tasks(request): TodoItem.objects.all().delete() return redirect('core:index') + def execute_command(command_data): command_name = command_data.get('name') args = command_data.get('args', {}) @@ -145,40 +152,33 @@ def execute_command(command_data): logger.error(f"Error executing command '{command_name}': {e}", exc_info=True) return f"[SYSTEM] Error executing command '{command_name}': {e}" -def chat_view(request, conversation_id=None): - if request.method == 'POST': - if 'title' in request.POST: - title = request.POST.get('title', 'New Conversation') - conversation = Conversation.objects.create(title=title) - return redirect('core:chat_detail', conversation_id=conversation.id) - - elif 'text' in request.POST and conversation_id: - text = request.POST.get('text') - selected_conversation = get_object_or_404(Conversation, id=conversation_id) - - if text: - command_name = None # Initialize command_name - Message.objects.create(conversation=selected_conversation, content=text, sender='user') - - history = [] - for msg in selected_conversation.messages.order_by('created_at'): - role = msg.sender - if role == 'ai': - role = 'assistant' - elif role == 'system': - role = 'user' - history.append({"role": role, "content": msg.content}) - - try: - custom_instructions, created = Setting.objects.get_or_create( - key='custom_instructions', - defaults={'value': ''} - ) - custom_instructions_text = custom_instructions.value + '\n\n' if custom_instructions.value else '' - system_message = { - "role": "system", - "content": custom_instructions_text + '''You are a project management assistant. To communicate with the user, you MUST use the `send_message` command. +def run_ai_process_in_background(conversation_id): + """This function runs in a separate thread.""" + try: + conversation = get_object_or_404(Conversation, id=conversation_id) + conversation.is_generating = True + conversation.save() + + history = [] + for msg in conversation.messages.order_by('created_at'): + role = msg.sender + if role == 'ai': + role = 'assistant' + # User messages are already 'user', system messages are 'user' for the model + elif role == 'system': + role = 'user' + history.append({"role": role, "content": msg.content}) + + custom_instructions, _ = Setting.objects.get_or_create( + key='custom_instructions', + defaults={'value': ''} + ) + custom_instructions_text = custom_instructions.value + '\n\n' if custom_instructions.value else '' + + system_message = { + "role": "system", + "content": custom_instructions_text + '''You are a project management assistant. To communicate with the user, you MUST use the `send_message` command. **Commands must be in a specific JSON format.** Your response must be a JSON object with the following structure: @@ -253,86 +253,116 @@ def chat_view(request, conversation_id=None): ``` **IMPORTANT:** Do not wrap the JSON command in markdown backticks or any other text. The entire response must be the JSON object.''' - } + } - tasks = TodoItem.objects.all().order_by('created_at') - task_list_str = "\n".join([ - f"- ID {task.id}: {task.title} (Status: {task.get_status_display()}, Tags: {task.tags or 'None'})" for task in tasks - ]) + tasks = TodoItem.objects.all().order_by('created_at') + task_list_str = "\n".join([ + f"- ID {task.id}: {task.title} (Status: {task.get_status_display()}, Tags: {task.tags or 'None'})" for task in tasks + ]) + + tasks_context = { + "role": "system", + "content": f"Here is the current list of tasks:\n{task_list_str}" + } + + logger.info("Starting AI processing loop...") + + for i in range(7): # Loop up to 7 times + logger.info(f"AI loop iteration {i+1}") + + response = LocalAIApi.create_response({ + "input": [ + system_message, + tasks_context, + ] + history, + "text": {"format": {"type": "json_object"}}, + }) + + if not response.get("success"): + logger.error(f"AI API request failed. Full error: {response.get('error')}") + ai_text = "I couldn't process that. Please try again." + Message.objects.create(conversation=conversation, content=ai_text, sender='ai') + break + + ai_text = LocalAIApi.extract_text(response) + if not ai_text: + logger.warning("AI response was empty.") + ai_text = "I couldn't process that. Please try again." + Message.objects.create(conversation=conversation, content=ai_text, sender='ai') + break + + try: + command_json = json.loads(ai_text) + if 'command' in command_json: + command_name = command_json.get('command', {}).get('name') + command_result = execute_command(command_json['command']) - tasks_context = { - "role": "system", - "content": f"Here is the current list of tasks:\n{task_list_str}" - } - - logger.info("Starting AI processing loop...") + sender = 'ai' if command_name == 'send_message' else 'system' + Message.objects.create(conversation=conversation, content=command_result, sender=sender) - for i in range(7): # Loop up to 7 times - logger.info(f"AI loop iteration {i+1}") - - response = LocalAIApi.create_response({ - "input": [ - system_message, - tasks_context, - ] + history, - "text": {"format": {"type": "json_object"}}, - }) + if command_name == 'send_message': + break + + history.append({"role": "user", "content": command_result}) + else: + Message.objects.create(conversation=conversation, content=ai_text, sender='ai') + break + except (json.JSONDecodeError, TypeError): + Message.objects.create(conversation=conversation, content=ai_text, sender='ai') + break + else: + logger.warning("AI loop finished after 7 iterations without sending a message.") + final_message = "I seem to be stuck in a loop. Could you clarify what you'd like me to do?" + Message.objects.create(conversation=conversation, content=final_message, sender='ai') - if not response.get("success"): - logger.error(f"AI API request failed with status {response.get('status')}. Full error: {response.get('response')}") - ai_text = "I couldn't process that. Please try again." - Message.objects.create(conversation=selected_conversation, content=ai_text, sender='ai') - break - - logger.info(f"AI raw response: {response}") - ai_text = LocalAIApi.extract_text(response) - logger.info(f"Extracted AI text: {ai_text}") + except Exception as e: + logger.error(f"An unexpected error occurred in background AI process: {e}", exc_info=True) + try: + # Try to inform the user about the error + Message.objects.create(conversation_id=conversation_id, content=f"An internal error occurred: {str(e)}", sender='ai') + except Exception as e2: + logger.error(f"Could not even save the error message to the conversation: {e2}", exc_info=True) - if not ai_text: - logger.warning("AI response was empty.") - ai_text = "I couldn't process that. Please try again." - Message.objects.create(conversation=selected_conversation, content=ai_text, sender='ai') - break - - try: - command_json = json.loads(ai_text) - if 'command' in command_json: - command_name = command_json.get('command', {}).get('name') - command_result = execute_command(command_json['command']) - - sender = 'ai' if command_name == 'send_message' else 'system' - Message.objects.create(conversation=selected_conversation, content=command_result, sender=sender) - - if command_name == 'send_message': - break # Exit loop if send_message is called - - # Add system message with command result to history for next iteration - history.append({"role": "user", "content": command_result}) + finally: + # Ensure is_generating is always set to False + try: + conversation = Conversation.objects.get(id=conversation_id) + conversation.is_generating = False + conversation.save() + except Conversation.DoesNotExist: + logger.error(f"Conversation with ID {conversation_id} does not exist when trying to finalize background process.") + except Exception as e: + logger.error(f"Could not finalize background process for conversation {conversation_id}: {e}", exc_info=True) - else: - # If it's a JSON but not a command, save it as a message and break - Message.objects.create(conversation=selected_conversation, content=ai_text, sender='ai') - break - except (json.JSONDecodeError, TypeError): - # Not a JSON command, treat as a raw message and break - Message.objects.create(conversation=selected_conversation, content=ai_text, sender='ai') - break - else: - # This block executes if the loop completes without a 'break' - logger.warning("AI loop finished after 7 iterations without sending a message.") - final_message = "I seem to be stuck in a loop. Could you clarify what you'd like me to do?" - Message.objects.create(conversation=selected_conversation, content=final_message, sender='ai') - - except Exception as e: - logger.error(f"An unexpected error occurred: {e}", exc_info=True) - ai_text = f"An error occurred: {str(e)}" - Message.objects.create(conversation=selected_conversation, content=ai_text, sender='ai') - - return redirect('core:chat_detail', conversation_id=conversation_id) +def chat_view(request, conversation_id=None): + if request.method == 'POST': + # Create a new conversation + if 'title' in request.POST: + title = request.POST.get('title', 'New Conversation').strip() + if not title: + title = 'New Conversation' + conversation = Conversation.objects.create(title=title) + return redirect('core:chat_detail', conversation_id=conversation.id) + + # Send a message in an existing conversation + elif 'text' in request.POST and conversation_id: + text = request.POST.get('text').strip() + if text: + conversation = get_object_or_404(Conversation, id=conversation_id) + Message.objects.create(conversation=conversation, content=text, sender='user') + + # Start AI processing in a background thread + thread = threading.Thread(target=run_ai_process_in_background, args=(conversation_id,)) + thread.daemon = True + thread.start() + + return redirect('core:chat_detail', conversation_id=conversation_id) conversations = Conversation.objects.order_by('-created_at') - selected_conversation = get_object_or_404(Conversation, id=conversation_id) if conversation_id else None + selected_conversation = None + if conversation_id: + selected_conversation = get_object_or_404(Conversation, id=conversation_id) return render(request, 'core/chat.html', { 'conversation_list': conversations, @@ -340,13 +370,23 @@ def chat_view(request, conversation_id=None): 'timestamp': int(time.time()), }) + def conversation_list(request): conversations = Conversation.objects.order_by('-created_at') return render(request, 'core/conversation_list.html', {'conversation_list': conversations}) + +def get_conversation_messages(request, conversation_id): + conversation = get_object_or_404(Conversation, id=conversation_id) + messages = conversation.messages.order_by('created_at').values('sender', 'content', 'created_at') + return JsonResponse({ + 'messages': list(messages), + 'is_generating': conversation.is_generating + }) + + def settings_view(request): - # Get or create the custom_instructions setting - custom_instructions, created = Setting.objects.get_or_create( + custom_instructions, _ = Setting.objects.get_or_create( key='custom_instructions', defaults={'value': ''} ) @@ -358,4 +398,4 @@ def settings_view(request): return render(request, 'core/settings.html', { 'custom_instructions': custom_instructions - }) \ No newline at end of file + })