Skip to main content
Create an STT engine instance for batch/offline transcription. This is ideal for transcribing complete audio files or recorded audio samples. For real-time streaming recognition, use createStreamingSTT() instead.
function createSTT(
  options: STTInitializeOptions | ModelPathConfig
): Promise<SttEngine>

Parameters

options
STTInitializeOptions | ModelPathConfig
required
Initialization options or a model path configuration.

Returns

Promise<SttEngine>
SttEngine
A promise that resolves to an STT engine instance.

Examples

Basic Usage

import { createSTT, assetModelPath } from 'react-native-sherpa-onnx/stt';

// Create STT engine with asset model
const stt = await createSTT({
  modelPath: assetModelPath('models/whisper-tiny-en'),
});

// Transcribe an audio file
const result = await stt.transcribeFile('/path/to/audio.wav');
console.log('Transcription:', result.text);

// Clean up
await stt.destroy();

With Auto-Detection

import { createSTT, autoModelPath } from 'react-native-sherpa-onnx/stt';

const stt = await createSTT({
  modelPath: autoModelPath('models/sherpa-onnx-whisper-tiny'),
  modelType: 'auto', // Automatically detect model type
});

const result = await stt.transcribeFile('recording.wav');
console.log(result.text);
await stt.destroy();

With Downloaded Model

import { createSTT, fileModelPath } from 'react-native-sherpa-onnx/stt';
import { getLocalModelPathByCategory, ModelCategory } from 'react-native-sherpa-onnx/download';

// Get path to downloaded model
const modelPath = await getLocalModelPathByCategory(
  ModelCategory.Stt,
  'sherpa-onnx-whisper-tiny-en'
);

if (modelPath) {
  const stt = await createSTT({
    modelPath: fileModelPath(modelPath),
  });
  
  const result = await stt.transcribeFile('audio.wav');
  console.log(result.text);
  await stt.destroy();
}

With Hotwords (Keyword Boosting)

import { createSTT, assetModelPath } from 'react-native-sherpa-onnx/stt';

const stt = await createSTT({
  modelPath: assetModelPath('models/zipformer-transducer-en'),
  modelType: 'transducer',
  hotwordsFile: '/path/to/hotwords.txt', // One keyword per line
  hotwordsScore: 2.0,
  modelingUnit: 'bpe',
  bpeVocab: '/path/to/bpe.vocab',
});

const result = await stt.transcribeFile('audio.wav');
// Result will have higher confidence for keywords in hotwords file
console.log(result.text);
await stt.destroy();

With Whisper Model Options

import { createSTT, assetModelPath } from 'react-native-sherpa-onnx/stt';

const stt = await createSTT({
  modelPath: assetModelPath('models/whisper-base'),
  modelType: 'whisper',
  modelOptions: {
    whisper: {
      language: 'en',
      task: 'transcribe', // or 'translate' for English translation
      tailPaddings: 1000,
    },
  },
});

const result = await stt.transcribeFile('audio.wav');
console.log(result.text);
await stt.destroy();

Transcribe PCM Samples

import { createSTT, assetModelPath } from 'react-native-sherpa-onnx/stt';

const stt = await createSTT({
  modelPath: assetModelPath('models/whisper-tiny'),
});

// Assume samples is Float32Array or number[] of PCM audio in [-1, 1]
const samples: number[] = [...]; // Your audio samples
const sampleRate = 16000;

const result = await stt.transcribeSamples(samples, sampleRate);
console.log('Transcription:', result.text);
console.log('Tokens:', result.tokens);
console.log('Timestamps:', result.timestamps);

await stt.destroy();

With Hardware Acceleration

import { 
  createSTT, 
  assetModelPath 
} from 'react-native-sherpa-onnx/stt';
import { getCoreMlSupport } from 'react-native-sherpa-onnx';

// Check Core ML support on iOS
const coreMLSupport = await getCoreMlSupport();

const stt = await createSTT({
  modelPath: assetModelPath('models/whisper-tiny'),
  provider: coreMLSupport.canInit ? 'coreml' : 'cpu',
  numThreads: 2,
});

const result = await stt.transcribeFile('audio.wav');
console.log(result.text);
await stt.destroy();

detectSttModel()

Detect STT model type without initializing the recognizer.
function detectSttModel(
  modelPath: ModelPathConfig,
  options?: { preferInt8?: boolean; modelType?: STTModelType }
): Promise<{
  success: boolean;
  detectedModels: Array<{ type: string; modelDir: string }>;
  modelType?: string;
}>

Example

import { detectSttModel, assetModelPath } from 'react-native-sherpa-onnx/stt';

const result = await detectSttModel(
  assetModelPath('models/sherpa-onnx-whisper-tiny-en')
);

if (result.success) {
  console.log('Detected type:', result.modelType);
  console.log('Models found:', result.detectedModels);
}

See Also

Build docs developers (and LLMs) love