IBM Watson TextToSpeech - 无法读取 属性 .pipe of undefined

IBM Watson TextToSpeech - cannot read property .pipe of undefined

我有以下代码,直接来自文档:

var TextToSpeechV1 = require('watson-developer-cloud/text-to- 
speech/v1');
var fs = require('fs');

var textToSpeech = new TextToSpeechV1({
iam_apikey: '---myapikey---',
url: 'https://stream.watsonplatform.net/text-to-speech/api/'
});

var synthesizeParams = {
text: 'Hello world, you dummy ass',
accept: 'audio/wav',
voice: 'en-US_AllisonVoice'
};

// Pipe the synthesized text to a file. 
textToSpeech.synthesize(synthesizeParams).on('error', function(error) {
console.log(error);
}).pipe(fs.createWriteStream('hello_world.wav'));

当我 运行 它给出以下错误:

pi@raspberrypi:~/Desktop/tjbotcz_lite $ sudo node ttstest.js
/home/pi/Desktop/tjbotcz_lite/ttstest.js:16
textToSpeech.synthesize(synthesizeParams).on('error', function(error) {
                                         ^

TypeError: Cannot read property 'on' of undefined
    at Object.<anonymous> (/home/pi/Desktop/tjbotcz_lite/ttstest.js:16:42)
    at Module._compile (internal/modules/cjs/loader.js:654:30)
    at Object.Module._extensions..js (internal/modules/cjs/loader.js:665:10)
    at Module.load (internal/modules/cjs/loader.js:566:32)
    at tryModuleLoad (internal/modules/cjs/loader.js:506:12)
    at Function.Module._load (internal/modules/cjs/loader.js:498:3)
    at Function.Module.runMain (internal/modules/cjs/loader.js:695:10)
    at startup (internal/bootstrap/node.js:201:19)
    at bootstrapNodeJSCore (internal/bootstrap/node.js:516:3)

有什么线索吗?我和 TJBot 有同样的问题,所以我尝试了文档中的一个简单示例,瞧——同样的错误。 当我使用我的旧服务(使用用户名和密码,而不是 api 密钥)时,它工作正常。我有新版本的 watson-cloud 库 (3.13.1)。

感谢任何提示。 问候, 一月

下面的代码适用于我并使用文本到语音 API 键

生成 audio.wav 文件
var TextToSpeechV1 = require('watson-developer-cloud/text-to-speech/v1');
var fs = require('fs');

var textToSpeech = new TextToSpeechV1({
  iam_apikey: '<API_KEY>',
  url: 'https://stream.watsonplatform.net/text-to-speech/api'
});

var synthesizeParams = {
  text: 'Hello world',
  accept: 'audio/wav',
  voice: 'en-US_AllisonVoice'
};

textToSpeech
  .synthesize(synthesizeParams, function(err, audio) {
    if (err) {
      console.log(err);
      return;
    }
    textToSpeech.repairWavHeader(audio);
    fs.writeFileSync('audio.wav', audio);
    console.log('audio.wav written with a corrected wav header');
});

更新了 代码片段并且有效

我在这里寻求帮助,但那里的文档肯定是错误的。我能够通过使用 audio.result.pipe(fs.createWriteStream('audio.wav')

来解决这个问题

我使用 promises 实现了这个,所以我可以等待音频文件完全保存,然后进行进一步处理。另外,正如其他人所说,文档是错误的,正确的方法是使用 audio.result.pipe() 而不是 audio.pipe().

function synthesize_audio(text, mp3Path) {
    return new Promise((resolve, reject) => {
    console.log(`> Synthesizing audio from text: "${text}"`)

    const textToSpeech = new TextToSpeechV1({
      authenticator: new IamAuthenticator({
        apikey: apikey,
      }),
      url: apiUrl,
    });

    const synthesizeParams = {
      text: text,
      accept: 'audio/mp3',
      voice: 'pt-BR_IsabelaV3Voice',
    };

    textToSpeech.synthesize(synthesizeParams)
    .then(audio => {    
      audio.result
            .pipe(fs.createWriteStream(mp3Path))
            .on('finish', resolve)
            .on('error', reject);
    })
    .catch(err => {
      console.log('error:', err);
    });
  });
}

然后您可以等待并执行您想要的操作:

 await synthesize_audio(text, outputPath)
  .then(() => {
    //then you can safely do what you want
  })
}