Enable live transcription when creating a meeting. Transcript results stream to the client SDK via WebSocket, or your backend can receive them via webhook.
// npm install v100-sdk
import { V100 } from 'v100-sdk';
const v100 = new V100('v100_live_YOUR_API_KEY');
// Create meeting with live transcription enabled
const meeting = await v100.meetings.create({
name: 'Product Review',
transcription: {
enabled: true,
mode: 'live', // 'live' | 'post' | 'both'
language: 'auto', // Auto-detect or specify: 'en', 'es', 'fr', etc.
diarize: true, // Speaker identification
interimResults: true, // Partial words for live captions
wordTimestamps: true, // Per-word start/end times
customVocabulary: ['V100', 'Dilithium', 'ML-KEM']
},
captions: {
overlay: true, // Show live captions in meeting UI
position: 'bottom', // 'bottom' | 'top' | 'custom'
fontSize: 'medium' // 'small' | 'medium' | 'large'
}
});
// Listen for real-time transcript events (client SDK)
v100.on('transcription.interim', (data) => {
// data.text → "we should consider the"
// data.speaker → { id: "usr_abc", name: "Alice Chen" }
// data.isFinal → false
});
v100.on('transcription.final', (data) => {
// data.text → "we should consider the new pricing model"
// data.speaker → { id: "usr_abc", name: "Alice Chen" }
// data.words → [{ word: "we", start: 12.34, end: 12.50, confidence: 0.98 }, ...]
// data.language → "en"
// data.isFinal → true
});
// Get full transcript after meeting ends
const transcript = await v100.transcripts.get(meeting.id);
// transcript.segments → [{ speaker: "Alice", text: "...", start: 0.0, end: 5.2 }, ...]
// transcript.fullText → "Alice: We should consider..."
curl -X POST https://api.v100.ai/v1/meetings \
-H "Authorization: Bearer v100_live_YOUR_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"name": "Product Review",
"transcription": {
"enabled": true,
"mode": "live",
"language": "auto",
"diarize": true,
"interimResults": true,
"wordTimestamps": true
},
"captions": { "overlay": true }
}'