LLM Agent POC — Browser Multi-Tool Reasoning

Advanced / debug
JS Execution Sandbox (for js_exec tool)
`; const blob = new Blob([sandboxDoc], {type:'text/html'}); sandbox.src = URL.createObjectURL(blob); window.addEventListener('message', (ev)=>{ if(ev.data && ev.data.type === 'js_exec_result'){ if(ev.data.ok) appendConvo('tool:js_exec', ev.data.out || '(no output)'); else alertShow('danger', 'JS sandbox error: '+escapeHtml(ev.data.err)); } }); // Tool implementations async function tool_search(query){ const endpoint = document.getElementById('search-endpoint').value || '/api/search'; try{ const res = await fetch(endpoint + '?q=' + encodeURIComponent(query)); if(!res.ok) throw new Error('Search API returned '+res.status); const j = await res.json(); // expect {snippets: [{title, snippet, link}, ...]} return JSON.stringify(j, null, 2); }catch(e){ throw new Error('Search failed: '+e.message); } } async function tool_aipipe(payload){ const endpoint = document.getElementById('aipipe-endpoint').value || '/api/aipipe'; try{ const res = await fetch(endpoint, { method:'POST', headers:{'Content-Type':'application/json'}, body: JSON.stringify(payload) }); if(!res.ok) throw new Error('AI Pipe returned '+res.status); const j = await res.json(); return JSON.stringify(j, null, 2); }catch(e){ throw new Error('AIPipe failed: '+e.message); } } async function tool_js_exec(code){ // post to iframe sandbox.contentWindow.postMessage({code}, '*'); // result arrives via message listener return '(executing in sandbox — output will appear)'; } // LLM call — expects OpenAI-style function-calling output async function callLLM(messages){ const apiEndpoint = document.getElementById('api-endpoint').value.trim(); const apiKey = document.getElementById('api-key').value.trim(); if(!apiEndpoint){ alertShow('warning', 'Please set an API endpoint in the Model section (or use a proxy)'); } // build request in a minimal OpenAI Chat Completions v1 style const body = { model: document.getElementById('model').value, messages: messages, temperature: 0.2, // allow function calling — the LLM can return a function_call functions: [ {name:'search', description:'Search the web and return snippets', parameters:{type:'object', properties:{q:{type:'string'}}, required:['q']}}, {name:'aipipe', description:'Call an AI Pipe workflow', parameters:{type:'object', properties:{payload:{type:'object'}}}}, {name:'js_exec', description:'Execute JS in a sandbox', parameters:{type:'object', properties:{code:{type:'string'}}}} ], function_call: 'auto' }; const headers = {'Content-Type':'application/json'}; if(apiKey) headers['Authorization'] = 'Bearer ' + apiKey; const res = await fetch(apiEndpoint, {method:'POST', headers, body: JSON.stringify(body)}); if(!res.ok) throw new Error('LLM proxy error ' + res.status); const j = await res.json(); // support multiple response shapes const message = j.choices && j.choices[0] && j.choices[0].message ? j.choices[0].message : j; return message; } // Agent loop let convo = []; // OpenAI-style messages let running = false; async function handleToolCall(funcName, args){ try{ if(funcName === 'search'){ appendConvo('tool:call', `search(${args.q})`); const r = await tool_search(args.q); appendConvo('tool:search', r); return {role:'tool', name:'search', content: r}; } if(funcName === 'aipipe'){ appendConvo('tool:call', `aipipe(${JSON.stringify(args).slice(0,200)})`); const r = await tool_aipipe(args.payload || {}); appendConvo('tool:aipipe', r); return {role:'tool', name:'aipipe', content: r}; } if(funcName === 'js_exec'){ appendConvo('tool:call', `js_exec(code...)`); const r = await tool_js_exec(args.code || ''); return {role:'tool', name:'js_exec', content: r}; } throw new Error('Unknown tool '+funcName); }catch(e){ alertShow('danger', 'Tool error: '+escapeHtml(e.message)); return {role:'tool', name:funcName, content: 'ERROR: '+String(e)}; } } async function agentStep(){ if(running) return; running = true; try{ const llmMsg = await callLLM(convo); // print assistant text part if exists if(llmMsg.content) appendConvo('assistant', llmMsg.content || ''); // check function_call if(llmMsg.function_call){ const fc = llmMsg.function_call; // function_call may be object or string let fnName = fc.name; let fnArgs = {}; try{ fnArgs = typeof fc.arguments === 'string' ? JSON.parse(fc.arguments) : (fc.arguments || {}); }catch(e){ fnArgs = {}; } // allow multiple tool calls in sequence by looping until no more function_call const toolResult = await handleToolCall(fnName, fnArgs); // add tool result to convo and immediately loop to LLM with the tool output convo.push(toolResult); // continue the loop automatically await agentStep(); } else { // no tool call — done for now // await user to type next message } } catch(e){ alertShow('danger', 'Agent error: '+escapeHtml(String(e))); } finally{ running = false; } } // UI handlers document.getElementById('send').addEventListener('click', async ()=>{ const input = document.getElementById('user-input').value.trim(); if(!input) return; appendConvo('user', input); convo.push({role:'user', content: input}); document.getElementById('user-input').value = ''; // kick the agent loop await agentStep(); }); // keyboard Enter document.getElementById('user-input').addEventListener('keydown', (e)=>{ if(e.key === 'Enter' && !e.shiftKey){ e.preventDefault(); document.getElementById('send').click(); }});