Skip to content

Commit

Permalink
feat(stt): remove interimResults and lowLatency wss params
Browse files Browse the repository at this point in the history
  • Loading branch information
apaparazzi0329 committed May 17, 2024
1 parent 8f19640 commit 6c79d93
Show file tree
Hide file tree
Showing 5 changed files with 22 additions and 28 deletions.
8 changes: 2 additions & 6 deletions lib/recognize-stream.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* (C) Copyright IBM Corp. 2014, 2020.
* (C) Copyright IBM Corp. 2014, 2024.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down Expand Up @@ -62,7 +62,7 @@ class RecognizeStream extends Duplex {
*
* Uses WebSockets under the hood. For audio with no recognizable speech, no `data` events are emitted.
*
* By default, only finalized text is emitted in the data events, however when `objectMode`/`readableObjectMode` and `interim_results` are enabled, both interim and final results objects are emitted.
* By default, only finalized text is emitted in the data events, however when `objectMode`/`readableObjectMode` is enabled, both interim and final results objects are emitted.
* WriteableElementStream uses this, for example, to live-update the DOM with word-by-word transcriptions.
*
* Note that the WebSocket connection is not established until the first chunk of data is recieved. This allows for auto-detection of content type (for wav/flac/opus audio).
Expand All @@ -86,7 +86,6 @@ class RecognizeStream extends Duplex {
* @param {string} [options.contentType] - The format (MIME type) of the audio
* @param {number} [options.customizationWeight] - Tell the service how much weight to give to words from the custom language model compared to those from the base model for the current request
* @param {number} [options.inactivityTimeout] - The time in seconds after which, if only silence (no speech) is detected in the audio, the connection is closed (default=30)
* @param {boolean} [options.interimResults] - If true, the service returns interim results as a stream of JSON SpeechRecognitionResults objects (default=false)
* @param {string[]} [options.keywords] - An array of keyword strings to spot in the audio
* @param {number} [options.keywordsThreshold] - A confidence value that is the lower bound for spotting a keyword
* @param {number} [options.maxAlternatives] - The maximum number of alternative transcripts that the service is to return (default=1)
Expand All @@ -105,7 +104,6 @@ class RecognizeStream extends Duplex {
* @param {boolean} [options.splitTranscriptAtPhraseEnd] - If `true`, directs the service to split the transcript into multiple final results based on semantic features of the input
* @param {number} [options.speechDetectorSensitivity] - The sensitivity of speech activity detection that the service is to perform
* @param {number} [options.backgroundAudioSuppression] - The level to which the service is to suppress background audio based on its volume to prevent it from being transcribed as speech
* @param {boolean} [params.lowLatency] - If `true` for next-generation `Multimedia` and `Telephony` models that support low latency, directs the service to produce results even more quickly than it usually does
* @constructor
*/
constructor(options: RecognizeStream.Options) {
Expand Down Expand Up @@ -168,7 +166,6 @@ class RecognizeStream extends Duplex {
'timestamps',
'word_confidence',
'content-type',
'interim_results',
'keywords',
'keywords_threshold',
'max_alternatives',
Expand All @@ -182,7 +179,6 @@ class RecognizeStream extends Duplex {
'split_transcript_at_phrase_end',
'speech_detector_sensitivity',
'background_audio_suppression',
'low_latency',
];
const openingMessage = processUserParameters(options, openingMessageParamsAllowed);
openingMessage.action = 'start';
Expand Down
34 changes: 17 additions & 17 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 2 additions & 2 deletions speech-to-text/v1-generated.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6602,13 +6602,13 @@ namespace SpeechToTextV1 {
* elements: the word followed by its start and end time in seconds, for example:
* `[["hello",0.0,1.2],["world",1.2,2.5]]`. Timestamps are returned only for the best alternative.
*/
timestamps?: string[];
timestamps?: [string, number, number][];
/** A confidence score for each word of the transcript as a list of lists. Each inner list consists of two
* elements: the word and its confidence score in the range of 0.0 to 1.0, for example:
* `[["hello",0.95],["world",0.86]]`. Confidence scores are returned only for the best alternative and only with
* results marked as final.
*/
word_confidence?: string[];
word_confidence?: [string, number][];
}

/** Component results for a speech recognition request. */
Expand Down
2 changes: 0 additions & 2 deletions speech-to-text/v1.ts
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ namespace SpeechToTextV1 {
contentType?: string;
customizationWeight?: number;
inactivityTimeout?: number;
interimResults?: boolean;
keywords?: string[];
keywordsThreshold?: number;
maxAlternatives?: number;
Expand All @@ -286,7 +285,6 @@ namespace SpeechToTextV1 {
splitTranscriptAtPhraseEnd?: boolean;
speechDetectorSensitivity?: number;
backgroundAudioSuppression?: number;
lowLatency?: boolean;
characterInsertionBias?: number;
}
}
Expand Down
2 changes: 1 addition & 1 deletion test/unit/speech-to-text.v1.test.js
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/**
* (C) Copyright IBM Corp. 2024.
* (C) Copyright IBM Corp. 2018, 2024.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand Down

0 comments on commit 6c79d93

Please sign in to comment.