Compare commits

..

No commits in common. "d4fe82e9a4169ef10edb6289d2d450baa6773728" and "fbf5ad5561d6d70eb060a668f71888fc5a7fc559" have entirely different histories.

5 changed files with 154 additions and 129 deletions

View File

@ -1,4 +1,4 @@
# Build stage # ---------- Build stage (Keep this the same) ----------
FROM node:20-alpine AS build FROM node:20-alpine AS build
WORKDIR /app WORKDIR /app
COPY package.json package-lock.json ./ COPY package.json package-lock.json ./
@ -6,11 +6,18 @@ RUN npm ci
COPY . . COPY . .
RUN npm run build RUN npm run build
# Production stage # ---------- Diagnostic Production stage ----------
FROM node:20-alpine FROM python:3.11-slim
WORKDIR /app
COPY --from=build /app/dist ./dist # Set the working directory to where the React files live
COPY server.js . WORKDIR /usr/share/nginx/html
RUN npm install express@4 google-auth-library
EXPOSE 8080 # Copy build output from the build stage
CMD ["node", "server.js"] COPY --from=build /app/dist .
# Expose port 80 (Cloud Run expects this)
EXPOSE 80
# Run Python's built-in simple HTTP server
# This server is very "dumb" and will ignore/accept large IAP headers
CMD ["python", "-m", "http.server", "80"]

View File

@ -1,60 +0,0 @@
const express = require('express');
const path = require('path');
const { GoogleAuth } = require('google-auth-library');
const app = express();
app.use(express.json({ limit: '1mb' }));
const BACKEND_URL = process.env.BACKEND_URL;
if (!BACKEND_URL) {
console.error('FATAL: BACKEND_URL env var is required');
process.exit(1);
}
const auth = new GoogleAuth();
app.get('/health', (_req, res) => {
res.status(200).json({ status: 'ok' });
});
app.post('/api/chat', async (req, res) => {
console.log('🔥🔥🔥 /api/chat ROUTE HIT 🔥🔥🔥');
try {
// Create an ID-token authenticated client for the backend (audience = BACKEND_URL)
const idTokenClient = await auth.getIdTokenClient(BACKEND_URL);
// Forward request to backend
const backendResp = await idTokenClient.request({
url: `${BACKEND_URL}/chat`,
method: 'POST',
data: req.body,
});
res.status(backendResp.status).json(backendResp.data);
} catch (err) {
console.error('Proxy error FULL:', {
message: err.message,
responseStatus: err.response?.status,
responseData: err.response?.data,
stack: err.stack,
});
const status = err.response?.status || 500;
const message = err.response?.data || { error: err.message };
res.status(err.response?.status || 500).json({
error: err.response?.data || err.message || 'Unknown proxy error',
});
}
});
// Serve static assets
app.use(express.static(path.join(__dirname, 'dist')));
// SPA fallback
app.get('*', (_req, res) => {
res.sendFile(path.join(__dirname, 'dist', 'index.html'));
});
const port = parseInt(process.env.PORT || '8080', 10);
app.listen(port, '0.0.0.0', () => {
console.log(`Frontend proxy listening on port ${port}`);
});

View File

@ -24,21 +24,47 @@ export const useChat = (mode: ChatMode) => {
setIsLoading(true) setIsLoading(true)
try { try {
// Call the chat API and get the response // Create placeholder assistant message
const response = await apiClient.chat(question, mode) const assistantMessageId = (Date.now() + 1).toString()
let assistantContent = ''
// Add assistant message with the response setMessages((prev) => [
const assistantMessage: Message = { ...prev,
id: (Date.now() + 1).toString(), {
role: 'assistant', id: assistantMessageId,
content: response.response, role: 'assistant',
content: '',
},
])
// Process SSE stream
const streamGenerator = apiClient.chatStream(question, mode)
for await (const chunk of streamGenerator) {
if (chunk.type === 'chunk') {
// Append chunk content
assistantContent += chunk.data
// Update assistant message
setMessages((prev) =>
prev.map((msg) =>
msg.id === assistantMessageId
? { ...msg, content: assistantContent }
: msg
)
)
} else if (chunk.type === 'error') {
toast.error(chunk.data)
console.error('Stream error:', chunk.data)
} else if (chunk.type === 'done') {
setIsLoading(false)
}
} }
setMessages((prev) => [...prev, assistantMessage]) setIsLoading(false)
} catch (error) { } catch (error) {
console.error('Error sending message:', error) console.error('Error sending message:', error)
toast.error(error instanceof Error ? error.message : 'Failed to send message. Please try again.') toast.error('Failed to send message. Please try again.')
} finally {
setIsLoading(false) setIsLoading(false)
} }
} }

View File

@ -1,8 +1,7 @@
import type { ChatMode } from '@/types/chat' import type { ChatMode } from '@/types/chat'
// In production, use relative URL to route through the proxy server const API_BASE_URL = import.meta.env.VITE_API_URL || 'http://localhost:5000'
const API_BASE_URL = '/api' const USE_MOCK_DATA = true // Set to false when backend is ready
const USE_MOCK_DATA = false // Set to true to use mock data for testing
// Session management // Session management
export const getChatSessionId = (): string | null => { export const getChatSessionId = (): string | null => {
@ -33,34 +32,39 @@ const MOCK_RESPONSES = {
], ],
} }
// Chat response type from backend // Mock streaming function
export interface ChatResponse { async function* mockChatStream(
conversation_id: string
response: string
mode: string
sources: string[]
}
// Mock chat function
async function mockChat(
_question: string, _question: string,
mode: ChatMode mode: ChatMode
): Promise<ChatResponse> { ): AsyncGenerator<StreamEvent> {
const sessionId = crypto.randomUUID()
yield { type: 'session_id', data: sessionId }
// Select a random response based on mode // Select a random response based on mode
const responses = MOCK_RESPONSES[mode] const responses = MOCK_RESPONSES[mode]
const response = responses[Math.floor(Math.random() * responses.length)] const response = responses[Math.floor(Math.random() * responses.length)]
// Simulate network delay // Simulate streaming by yielding words with delays
await new Promise(resolve => setTimeout(resolve, 500)) const words = response.split(' ')
for (let i = 0; i < words.length; i++) {
// Add space before word (except first word)
const chunk = i === 0 ? words[i] : ' ' + words[i]
yield { type: 'chunk', data: chunk }
return { // Random delay between 30-80ms to simulate typing
conversation_id: crypto.randomUUID(), await new Promise(resolve => setTimeout(resolve, Math.random() * 50 + 30))
response,
mode,
sources: [],
} }
yield { type: 'done', data: null }
} }
// SSE stream event types
export type StreamEvent =
| { type: 'chunk'; data: string }
| { type: 'done'; data: null }
| { type: 'error'; data: string }
| { type: 'session_id'; data: string }
class ApiClient { class ApiClient {
private sessionId: string | null = null private sessionId: string | null = null
@ -78,40 +82,95 @@ class ApiClient {
clearChatSessionId() clearChatSessionId()
} }
// Simple JSON chat endpoint // Streaming chat endpoint (SSE)
async chat( async *chatStream(
question: string, question: string,
mode: ChatMode, mode: ChatMode,
sessionId?: string sessionId?: string
): Promise<ChatResponse> { ): AsyncGenerator<StreamEvent> {
// Use mock data if enabled // Use mock data if enabled
if (USE_MOCK_DATA) { if (USE_MOCK_DATA) {
return mockChat(question, mode) yield* mockChatStream(question, mode)
return
} }
// Generate or reuse session ID // Generate or reuse session ID
const actualSessionId = sessionId || this.getSessionId() || crypto.randomUUID() const actualSessionId = sessionId || this.getSessionId() || crypto.randomUUID()
this.setSessionId(actualSessionId) this.setSessionId(actualSessionId)
const response = await fetch(`${API_BASE_URL}/chat`, { // Yield session ID first
method: 'POST', yield { type: 'session_id', data: actualSessionId }
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
message: question,
conversation_id: actualSessionId,
}),
})
if (!response.ok) { try {
const errorData = await response.json().catch(() => ({})) const response = await fetch(`${API_BASE_URL}/api/chat/stream`, {
throw new Error(errorData.detail || `HTTP error! status: ${response.status}`) method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
question,
mode,
sessionId: actualSessionId,
}),
})
if (!response.ok) {
yield { type: 'error', data: `HTTP error! status: ${response.status}` }
return
}
if (!response.body) {
yield { type: 'error', data: 'Response body is null' }
return
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (line.startsWith('data: ')) {
const data = line.slice(6)
if (data === '[DONE]') {
yield { type: 'done', data: null }
return
}
try {
const parsed = JSON.parse(data)
if (parsed.type === 'error') {
yield { type: 'error', data: parsed.message }
} else if (parsed.content) {
yield { type: 'chunk', data: parsed.content }
} else {
// Raw string chunk
yield { type: 'chunk', data: data }
}
} catch {
// Raw string chunk (not JSON)
yield { type: 'chunk', data: data }
}
}
}
}
yield { type: 'done', data: null }
} catch (error) {
yield {
type: 'error',
data: error instanceof Error ? error.message : 'Network error',
}
} }
const data: ChatResponse = await response.json()
this.setSessionId(data.conversation_id)
return data
} }
} }

View File

@ -12,14 +12,7 @@ export default defineConfig({
}, },
server: { server: {
port: 3000, port: 3000,
host: true, host: true
proxy: {
'/api': {
target: 'http://localhost:8000',
changeOrigin: true,
rewrite: (path) => path.replace(/^\/api/, ''),
}
}
}, },
build: { build: {
outDir: 'dist', outDir: 'dist',