Parse Response

The backend uses the Vercel AI SDK for streaming responses. The easiest way to consume these responses is using the same SDK on the frontend.

RECOMMENDED

Using Vercel AI SDK

Install the AI SDK packages:

npm install @ai-sdk/react ai

React / Next.js

Use the useChat hook for automatic stream parsing and state management:

ChatComponent.tsx
import { useChat, DefaultChatTransport } from '@ai-sdk/react';
import { useMemo } from 'react';

const BOX_URL = 'https://your-box.intelligencebox.it';
const API_KEY = 'YOUR_API_KEY';

export function ChatComponent({ chatId }: { chatId: string }) {
  // Create transport with your box server URL
  const transport = useMemo(() => new DefaultChatTransport({
    api: `${BOX_URL}/api/ai/chat`,
    headers: async () => ({
      'x-api-key': API_KEY,
    }),
    body: {
      id: chatId,
      boxAddress: BOX_URL,
    }
  }), [chatId]);

  const { messages, sendMessage, status, stop } = useChat({
    id: chatId,
    transport,
    onFinish: () => console.log('Stream completed'),
    onError: (error) => console.error('Error:', error),
  });

  const handleSubmit = async (text: string) => {
    await sendMessage({ text });
  };

  return (
    <div>
      {/* Messages */}
      {messages.map((message) => (
        <div key={message.id} className={message.role}>
          <strong>{message.role}:</strong>
          {message.content}
        </div>
      ))}

      {/* Input */}
      <form onSubmit={(e) => {
        e.preventDefault();
        const input = e.currentTarget.querySelector('input');
        if (input?.value) {
          handleSubmit(input.value);
          input.value = '';
        }
      }}>
        <input type="text" placeholder="Type a message..." />
        <button type="submit" disabled={status === 'streaming'}>
          Send
        </button>
      </form>

      {/* Stop button */}
      {status === 'streaming' && (
        <button onClick={stop}>Stop Generation</button>
      )}
    </div>
  );
}

Manual Stream Parsing (Any Framework)

If you're not using React, use parseUIMessageStream from the AI SDK:

manual-parsing.ts
import { parseUIMessageStream } from 'ai';

const BOX_URL = 'https://your-box.intelligencebox.it';

async function streamChat(message: string) {
  const response = await fetch(`${BOX_URL}/api/ai/chat`, {
    method: 'POST',
    headers: {
      'Content-Type': 'application/json',
      'x-api-key': 'YOUR_API_KEY',
    },
    body: JSON.stringify({
      id: 'chat-' + Date.now(),
      messages: [{ role: 'user', content: message }],
      boxAddress: BOX_URL,
    }),
  });

  let fullText = '';

  // Parse the UI message stream
  for await (const part of parseUIMessageStream(response)) {
    switch (part.type) {
      case 'text':
        // Incremental text chunk
        fullText += part.text;
        console.log('Chunk:', part.text);
        break;

      case 'tool-call':
        // AI is calling a tool (web search, etc.)
        console.log('Tool call:', part.toolName, part.args);
        break;

      case 'tool-result':
        // Tool execution result
        console.log('Tool result:', part.toolName, part.result);
        break;

      case 'finish':
        // Stream completed
        console.log('Finished:', part.finishReason);
        break;

      case 'error':
        // Error occurred
        console.error('Error:', part.error);
        break;
    }
  }

  return fullText;
}

// Usage
const answer = await streamChat('What is AI?');

Stream Event Types

TypePropertiesDescription
texttext: stringIncremental text content
tool-calltoolCallId, toolName, argsAI is invoking a tool
tool-resulttoolCallId, resultResult from tool execution
finishfinishReason, usageStream completed
errorerror: stringAn error occurred

Vue.js

useChat.ts
import { ref, onUnmounted } from 'vue';
import { parseUIMessageStream } from 'ai';

const BOX_URL = 'https://your-box.intelligencebox.it';

export function useChat() {
  const messages = ref<Array<{ role: string; content: string }>>([]);
  const isStreaming = ref(false);
  let abortController: AbortController | null = null;

  async function sendMessage(content: string) {
    isStreaming.value = true;
    abortController = new AbortController();

    // Add user message
    messages.value.push({ role: 'user', content });
    // Add empty assistant message for streaming
    messages.value.push({ role: 'assistant', content: '' });
    const assistantIndex = messages.value.length - 1;

    try {
      const response = await fetch(`${BOX_URL}/api/ai/chat`, {
        method: 'POST',
        headers: {
          'Content-Type': 'application/json',
          'x-api-key': 'YOUR_API_KEY',
        },
        body: JSON.stringify({
          id: 'chat-' + Date.now(),
          messages: messages.value.slice(0, -1),
          boxAddress: BOX_URL,
        }),
        signal: abortController.signal,
      });

      for await (const part of parseUIMessageStream(response)) {
        if (part.type === 'text') {
          messages.value[assistantIndex].content += part.text;
        }
      }
    } catch (error) {
      if ((error as Error).name !== 'AbortError') {
        console.error('Error:', error);
      }
    } finally {
      isStreaming.value = false;
    }
  }

  function stop() {
    abortController?.abort();
  }

  onUnmounted(() => stop());

  return { messages, isStreaming, sendMessage, stop };
}

Python

For Python, parse the raw SSE stream:

chat.py
import requests
import json
import time

BOX_URL = 'https://your-box.intelligencebox.it'

def chat(message, assistant_id=None, vector_ids=None):
    payload = {
        'id': f'chat-{int(time.time())}',
        'messages': [{'role': 'user', 'content': message}],
        'boxAddress': BOX_URL
    }

    if assistant_id:
        payload['assistantId'] = assistant_id
    if vector_ids:
        payload['vector'] = vector_ids

    response = requests.post(
        f'{BOX_URL}/api/ai/chat',
        headers={
            'Content-Type': 'application/json',
            'x-api-key': 'YOUR_API_KEY'
        },
        json=payload,
        stream=True
    )

    full_response = ''

    for line in response.iter_lines():
        if line:
            line = line.decode('utf-8')
            if line.startswith('data: '):
                try:
                    data = json.loads(line[6:])
                    event_type = data.get('type')

                    if event_type == 'text':
                        chunk = data.get('text', '')
                        full_response += chunk
                        print(chunk, end='', flush=True)
                    elif event_type == 'tool-call':
                        print(f"\n[Tool: {data.get('toolName')}]")
                    elif event_type == 'error':
                        print(f"\nError: {data.get('error')}")
                    elif event_type == 'finish':
                        print('\n[Done]')

                except json.JSONDecodeError:
                    pass

    return full_response

# Usage
answer = chat('What is AI?')
answer = chat('Summarize my docs', vector_ids=['folder-id'])
answer = chat('Help me', assistant_id='assistant-id')

cURL

Use -N flag to disable buffering:

curl -N -X POST BOX_URL/api/ai/chat \
  -H "x-api-key: YOUR_API_KEY" \
  -H "Content-Type: application/json" \
  -d '{"id":"chat-1","messages":[{"role":"user","content":"Hello"}],"boxAddress":"BOX_URL"}'

Output:

data: {"type":"text","text":"Hello"}
data: {"type":"text","text":"! How"}
data: {"type":"text","text":" can I help you today?"}
data: {"type":"finish","finishReason":"stop"}

Resources