Compare commits
No commits in common. "d4fe82e9a4169ef10edb6289d2d450baa6773728" and "fbf5ad5561d6d70eb060a668f71888fc5a7fc559" have entirely different histories.
d4fe82e9a4
...
fbf5ad5561
25
Dockerfile
25
Dockerfile
|
|
@ -1,4 +1,4 @@
|
|||
# Build stage
|
||||
# ---------- Build stage (Keep this the same) ----------
|
||||
FROM node:20-alpine AS build
|
||||
WORKDIR /app
|
||||
COPY package.json package-lock.json ./
|
||||
|
|
@ -6,11 +6,18 @@ RUN npm ci
|
|||
COPY . .
|
||||
RUN npm run build
|
||||
|
||||
# Production stage
|
||||
FROM node:20-alpine
|
||||
WORKDIR /app
|
||||
COPY --from=build /app/dist ./dist
|
||||
COPY server.js .
|
||||
RUN npm install express@4 google-auth-library
|
||||
EXPOSE 8080
|
||||
CMD ["node", "server.js"]
|
||||
# ---------- Diagnostic Production stage ----------
|
||||
FROM python:3.11-slim
|
||||
|
||||
# Set the working directory to where the React files live
|
||||
WORKDIR /usr/share/nginx/html
|
||||
|
||||
# Copy build output from the build stage
|
||||
COPY --from=build /app/dist .
|
||||
|
||||
# Expose port 80 (Cloud Run expects this)
|
||||
EXPOSE 80
|
||||
|
||||
# Run Python's built-in simple HTTP server
|
||||
# This server is very "dumb" and will ignore/accept large IAP headers
|
||||
CMD ["python", "-m", "http.server", "80"]
|
||||
|
|
|
|||
60
server.js
60
server.js
|
|
@ -1,60 +0,0 @@
|
|||
const express = require('express');
|
||||
const path = require('path');
|
||||
const { GoogleAuth } = require('google-auth-library');
|
||||
|
||||
const app = express();
|
||||
app.use(express.json({ limit: '1mb' }));
|
||||
|
||||
const BACKEND_URL = process.env.BACKEND_URL;
|
||||
if (!BACKEND_URL) {
|
||||
console.error('FATAL: BACKEND_URL env var is required');
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const auth = new GoogleAuth();
|
||||
|
||||
app.get('/health', (_req, res) => {
|
||||
res.status(200).json({ status: 'ok' });
|
||||
});
|
||||
|
||||
app.post('/api/chat', async (req, res) => {
|
||||
console.log('🔥🔥🔥 /api/chat ROUTE HIT 🔥🔥🔥');
|
||||
try {
|
||||
// Create an ID-token authenticated client for the backend (audience = BACKEND_URL)
|
||||
const idTokenClient = await auth.getIdTokenClient(BACKEND_URL);
|
||||
|
||||
// Forward request to backend
|
||||
const backendResp = await idTokenClient.request({
|
||||
url: `${BACKEND_URL}/chat`,
|
||||
method: 'POST',
|
||||
data: req.body,
|
||||
});
|
||||
|
||||
res.status(backendResp.status).json(backendResp.data);
|
||||
} catch (err) {
|
||||
console.error('Proxy error FULL:', {
|
||||
message: err.message,
|
||||
responseStatus: err.response?.status,
|
||||
responseData: err.response?.data,
|
||||
stack: err.stack,
|
||||
});
|
||||
const status = err.response?.status || 500;
|
||||
const message = err.response?.data || { error: err.message };
|
||||
res.status(err.response?.status || 500).json({
|
||||
error: err.response?.data || err.message || 'Unknown proxy error',
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// Serve static assets
|
||||
app.use(express.static(path.join(__dirname, 'dist')));
|
||||
|
||||
// SPA fallback
|
||||
app.get('*', (_req, res) => {
|
||||
res.sendFile(path.join(__dirname, 'dist', 'index.html'));
|
||||
});
|
||||
|
||||
const port = parseInt(process.env.PORT || '8080', 10);
|
||||
app.listen(port, '0.0.0.0', () => {
|
||||
console.log(`Frontend proxy listening on port ${port}`);
|
||||
});
|
||||
|
|
@ -24,21 +24,47 @@ export const useChat = (mode: ChatMode) => {
|
|||
setIsLoading(true)
|
||||
|
||||
try {
|
||||
// Call the chat API and get the response
|
||||
const response = await apiClient.chat(question, mode)
|
||||
// Create placeholder assistant message
|
||||
const assistantMessageId = (Date.now() + 1).toString()
|
||||
let assistantContent = ''
|
||||
|
||||
// Add assistant message with the response
|
||||
const assistantMessage: Message = {
|
||||
id: (Date.now() + 1).toString(),
|
||||
role: 'assistant',
|
||||
content: response.response,
|
||||
setMessages((prev) => [
|
||||
...prev,
|
||||
{
|
||||
id: assistantMessageId,
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
},
|
||||
])
|
||||
|
||||
// Process SSE stream
|
||||
const streamGenerator = apiClient.chatStream(question, mode)
|
||||
|
||||
for await (const chunk of streamGenerator) {
|
||||
if (chunk.type === 'chunk') {
|
||||
// Append chunk content
|
||||
assistantContent += chunk.data
|
||||
|
||||
// Update assistant message
|
||||
setMessages((prev) =>
|
||||
prev.map((msg) =>
|
||||
msg.id === assistantMessageId
|
||||
? { ...msg, content: assistantContent }
|
||||
: msg
|
||||
)
|
||||
)
|
||||
} else if (chunk.type === 'error') {
|
||||
toast.error(chunk.data)
|
||||
console.error('Stream error:', chunk.data)
|
||||
} else if (chunk.type === 'done') {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
setMessages((prev) => [...prev, assistantMessage])
|
||||
setIsLoading(false)
|
||||
} catch (error) {
|
||||
console.error('Error sending message:', error)
|
||||
toast.error(error instanceof Error ? error.message : 'Failed to send message. Please try again.')
|
||||
} finally {
|
||||
toast.error('Failed to send message. Please try again.')
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
143
src/lib/api.ts
143
src/lib/api.ts
|
|
@ -1,8 +1,7 @@
|
|||
import type { ChatMode } from '@/types/chat'
|
||||
|
||||
// In production, use relative URL to route through the proxy server
|
||||
const API_BASE_URL = '/api'
|
||||
const USE_MOCK_DATA = false // Set to true to use mock data for testing
|
||||
const API_BASE_URL = import.meta.env.VITE_API_URL || 'http://localhost:5000'
|
||||
const USE_MOCK_DATA = true // Set to false when backend is ready
|
||||
|
||||
// Session management
|
||||
export const getChatSessionId = (): string | null => {
|
||||
|
|
@ -33,34 +32,39 @@ const MOCK_RESPONSES = {
|
|||
],
|
||||
}
|
||||
|
||||
// Chat response type from backend
|
||||
export interface ChatResponse {
|
||||
conversation_id: string
|
||||
response: string
|
||||
mode: string
|
||||
sources: string[]
|
||||
}
|
||||
|
||||
// Mock chat function
|
||||
async function mockChat(
|
||||
// Mock streaming function
|
||||
async function* mockChatStream(
|
||||
_question: string,
|
||||
mode: ChatMode
|
||||
): Promise<ChatResponse> {
|
||||
): AsyncGenerator<StreamEvent> {
|
||||
const sessionId = crypto.randomUUID()
|
||||
yield { type: 'session_id', data: sessionId }
|
||||
|
||||
// Select a random response based on mode
|
||||
const responses = MOCK_RESPONSES[mode]
|
||||
const response = responses[Math.floor(Math.random() * responses.length)]
|
||||
|
||||
// Simulate network delay
|
||||
await new Promise(resolve => setTimeout(resolve, 500))
|
||||
// Simulate streaming by yielding words with delays
|
||||
const words = response.split(' ')
|
||||
for (let i = 0; i < words.length; i++) {
|
||||
// Add space before word (except first word)
|
||||
const chunk = i === 0 ? words[i] : ' ' + words[i]
|
||||
yield { type: 'chunk', data: chunk }
|
||||
|
||||
return {
|
||||
conversation_id: crypto.randomUUID(),
|
||||
response,
|
||||
mode,
|
||||
sources: [],
|
||||
// Random delay between 30-80ms to simulate typing
|
||||
await new Promise(resolve => setTimeout(resolve, Math.random() * 50 + 30))
|
||||
}
|
||||
|
||||
yield { type: 'done', data: null }
|
||||
}
|
||||
|
||||
// SSE stream event types
|
||||
export type StreamEvent =
|
||||
| { type: 'chunk'; data: string }
|
||||
| { type: 'done'; data: null }
|
||||
| { type: 'error'; data: string }
|
||||
| { type: 'session_id'; data: string }
|
||||
|
||||
class ApiClient {
|
||||
private sessionId: string | null = null
|
||||
|
||||
|
|
@ -78,40 +82,95 @@ class ApiClient {
|
|||
clearChatSessionId()
|
||||
}
|
||||
|
||||
// Simple JSON chat endpoint
|
||||
async chat(
|
||||
// Streaming chat endpoint (SSE)
|
||||
async *chatStream(
|
||||
question: string,
|
||||
mode: ChatMode,
|
||||
sessionId?: string
|
||||
): Promise<ChatResponse> {
|
||||
): AsyncGenerator<StreamEvent> {
|
||||
// Use mock data if enabled
|
||||
if (USE_MOCK_DATA) {
|
||||
return mockChat(question, mode)
|
||||
yield* mockChatStream(question, mode)
|
||||
return
|
||||
}
|
||||
|
||||
// Generate or reuse session ID
|
||||
const actualSessionId = sessionId || this.getSessionId() || crypto.randomUUID()
|
||||
this.setSessionId(actualSessionId)
|
||||
|
||||
const response = await fetch(`${API_BASE_URL}/chat`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
message: question,
|
||||
conversation_id: actualSessionId,
|
||||
}),
|
||||
})
|
||||
// Yield session ID first
|
||||
yield { type: 'session_id', data: actualSessionId }
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}))
|
||||
throw new Error(errorData.detail || `HTTP error! status: ${response.status}`)
|
||||
try {
|
||||
const response = await fetch(`${API_BASE_URL}/api/chat/stream`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
question,
|
||||
mode,
|
||||
sessionId: actualSessionId,
|
||||
}),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
yield { type: 'error', data: `HTTP error! status: ${response.status}` }
|
||||
return
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
yield { type: 'error', data: 'Response body is null' }
|
||||
return
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (line.startsWith('data: ')) {
|
||||
const data = line.slice(6)
|
||||
|
||||
if (data === '[DONE]') {
|
||||
yield { type: 'done', data: null }
|
||||
return
|
||||
}
|
||||
|
||||
try {
|
||||
const parsed = JSON.parse(data)
|
||||
if (parsed.type === 'error') {
|
||||
yield { type: 'error', data: parsed.message }
|
||||
} else if (parsed.content) {
|
||||
yield { type: 'chunk', data: parsed.content }
|
||||
} else {
|
||||
// Raw string chunk
|
||||
yield { type: 'chunk', data: data }
|
||||
}
|
||||
} catch {
|
||||
// Raw string chunk (not JSON)
|
||||
yield { type: 'chunk', data: data }
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
yield { type: 'done', data: null }
|
||||
} catch (error) {
|
||||
yield {
|
||||
type: 'error',
|
||||
data: error instanceof Error ? error.message : 'Network error',
|
||||
}
|
||||
}
|
||||
|
||||
const data: ChatResponse = await response.json()
|
||||
this.setSessionId(data.conversation_id)
|
||||
return data
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,14 +12,7 @@ export default defineConfig({
|
|||
},
|
||||
server: {
|
||||
port: 3000,
|
||||
host: true,
|
||||
proxy: {
|
||||
'/api': {
|
||||
target: 'http://localhost:8000',
|
||||
changeOrigin: true,
|
||||
rewrite: (path) => path.replace(/^\/api/, ''),
|
||||
}
|
||||
}
|
||||
host: true
|
||||
},
|
||||
build: {
|
||||
outDir: 'dist',
|
||||
|
|
|
|||
Loading…
Reference in New Issue