Skip to main content
Sends audio data to the bot to be played in the meeting. This allows the bot to output speech or other audio content to meeting participants.

Method Signature

bot.outputAudio(params: OutputAudioParams): Promise<void>

Parameters

id
string
required
The unique identifier of the bot
kind
string
required
The type of audio data being sent. Typically “audio” or specific format identifier.
b64_data
string
required
Base64-encoded audio data to be played by the bot

Response

Returns void on success. The audio will be queued for playback by the bot.

Example

import { Recall } from '@recall.ai/sdk';
import { readFileSync } from 'fs';

const client = new Recall({
  apiKey: 'your-api-key',
  region: 'us-west-2'
});

// Read audio file and convert to base64
const audioBuffer = readFileSync('./announcement.wav');
const b64Audio = audioBuffer.toString('base64');

// Send audio to bot
await client.bot.outputAudio({
  id: 'bot_1234567890',
  kind: 'audio',
  b64_data: b64Audio
});

console.log('Audio sent to bot for playback');

Example: Text-to-Speech Playback

import fetch from 'node-fetch';

async function speakText(botId: string, text: string) {
  // Convert text to speech using a TTS service (example)
  const ttsResponse = await fetch('https://api.example.com/tts', {
    method: 'POST',
    headers: { 'Content-Type': 'application/json' },
    body: JSON.stringify({ text, voice: 'en-US-Standard-A' })
  });
  
  const audioBuffer = await ttsResponse.arrayBuffer();
  const b64Audio = Buffer.from(audioBuffer).toString('base64');
  
  // Send to bot
  await client.bot.outputAudio({
    id: botId,
    kind: 'audio',
    b64_data: b64Audio
  });
  
  console.log(`Bot will speak: "${text}"`);
}

await speakText('bot_1234567890', 'Hello everyone, the recording has started.');

Example: Play Multiple Audio Clips

async function playAudioSequence(botId: string, audioFiles: string[]) {
  for (const [index, filePath] of audioFiles.entries()) {
    console.log(`Playing audio ${index + 1}/${audioFiles.length}`);
    
    const audioBuffer = readFileSync(filePath);
    const b64Audio = audioBuffer.toString('base64');
    
    await client.bot.outputAudio({
      id: botId,
      kind: 'audio',
      b64_data: b64Audio
    });
    
    // Wait between clips
    await new Promise(resolve => setTimeout(resolve, 2000));
  }
  
  console.log('All audio clips sent');
}

await playAudioSequence('bot_1234567890', [
  './intro.wav',
  './announcement.wav',
  './outro.wav'
]);

Example: Interactive Bot Response

import { readFileSync } from 'fs';

async function interactiveBot(botId: string) {
  // Monitor transcript for keywords
  const checkForKeywords = async () => {
    const transcript = await client.bot.getTranscript({ id: botId });
    const recentWords = transcript.words.slice(-20);
    const recentText = recentWords.map(w => w.text).join(' ').toLowerCase();
    
    // Respond to specific phrases
    if (recentText.includes('what time is it')) {
      const time = new Date().toLocaleTimeString();
      await speakText(botId, `The current time is ${time}`);
    } else if (recentText.includes('thank you')) {
      const response = readFileSync('./youre-welcome.wav');
      await client.bot.outputAudio({
        id: botId,
        kind: 'audio',
        b64_data: response.toString('base64')
      });
    }
  };
  
  // Check every 30 seconds
  setInterval(checkForKeywords, 30000);
}

await interactiveBot('bot_1234567890');

Example: Scheduled Announcements

async function scheduleAnnouncement(
  botId: string,
  audioFile: string,
  delaySeconds: number
) {
  console.log(`Announcement scheduled in ${delaySeconds} seconds`);
  
  setTimeout(async () => {
    const audioBuffer = readFileSync(audioFile);
    const b64Audio = audioBuffer.toString('base64');
    
    await client.bot.outputAudio({
      id: botId,
      kind: 'audio',
      b64_data: b64Audio
    });
    
    console.log('Announcement played');
  }, delaySeconds * 1000);
}

// Play announcement after 5 minutes
await scheduleAnnouncement('bot_1234567890', './5-min-warning.wav', 300);

Example: With Error Handling

async function safeOutputAudio(
  botId: string,
  audioFile: string
): Promise<boolean> {
  try {
    // Check bot status first
    const bot = await client.bot.retrieve({ id: botId });
    
    if (!['in_call_not_recording', 'in_call_recording'].includes(bot.status)) {
      console.error(`Bot not in call (status: ${bot.status})`);
      return false;
    }
    
    // Read and send audio
    const audioBuffer = readFileSync(audioFile);
    const b64Audio = audioBuffer.toString('base64');
    
    await client.bot.outputAudio({
      id: botId,
      kind: 'audio',
      b64_data: b64Audio
    });
    
    console.log('Audio sent successfully');
    return true;
  } catch (error) {
    console.error('Failed to send audio:', error.message);
    return false;
  }
}

const success = await safeOutputAudio('bot_1234567890', './audio.wav');

Notes

  • The bot must be in the call (in_call_not_recording or in_call_recording status) to play audio
  • Audio format should be compatible with the meeting platform (typically WAV or MP3)
  • Base64 encoding increases the data size - be mindful of payload limits
  • Audio playback is queued and may have a slight delay before it starts
  • Not all meeting platforms support audio output from bots

Build docs developers (and LLMs) love