開發(fā)音頻通話功能

2024-02-16 13:57 更新

在音頻通話場景下,音頻輸出(播放對端聲音)和音頻輸入(錄制本端聲音)會(huì)同時(shí)進(jìn)行,應(yīng)用可以通過使用AudioRenderer來實(shí)現(xiàn)音頻輸出,通過使用AudioCapturer來實(shí)現(xiàn)音頻輸入,同時(shí)使用AudioRenderer和AudioCapturer即可實(shí)現(xiàn)音頻通話功能。

在音頻通話開始和結(jié)束時(shí),應(yīng)用可以自行檢查當(dāng)前的音頻場景模式鈴聲模式,以便采取合適的音頻管理及提示策略。

以下代碼示范了同時(shí)使用AudioRenderer和AudioCapturer實(shí)現(xiàn)音頻通話功能的基本過程,其中未包含音頻通話數(shù)據(jù)的傳輸過程,實(shí)際開發(fā)中,需要將網(wǎng)絡(luò)傳輸來的對端通話數(shù)據(jù)解碼播放,此處僅以讀取音頻文件的數(shù)據(jù)代替;同時(shí)需要將本端錄制的通話數(shù)據(jù)編碼打包,通過網(wǎng)絡(luò)發(fā)送給對端,此處僅以將數(shù)據(jù)寫入音頻文件代替。

使用AudioRenderer播放對端的通話聲音

該過程與使用AudioRenderer開發(fā)音頻播放功能過程相似,關(guān)鍵區(qū)別在于audioRenderInfo參數(shù)和音頻數(shù)據(jù)來源。audioRenderInfo參數(shù)中,音頻內(nèi)容類型需設(shè)置為語音,CONTENT_TYPE_SPEECH,音頻流使用類型需設(shè)置為語音通信,STREAM_USAGE_VOICE_COMMUNICATION。
  1. import audio from '@ohos.multimedia.audio';
  2. import fs from '@ohos.file.fs';
  3. const TAG = 'VoiceCallDemoForAudioRenderer';
  4. // 與使用AudioRenderer開發(fā)音頻播放功能過程相似,關(guān)鍵區(qū)別在于audioRendererInfo參數(shù)和音頻數(shù)據(jù)來源
  5. export default class VoiceCallDemoForAudioRenderer {
  6. private renderModel = undefined;
  7. private audioStreamInfo = {
  8. samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_48000, // 采樣率
  9. channels: audio.AudioChannel.CHANNEL_2, // 通道
  10. sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采樣格式
  11. encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 編碼格式
  12. }
  13. private audioRendererInfo = {
  14. // 需使用通話場景相應(yīng)的參數(shù)
  15. content: audio.ContentType.CONTENT_TYPE_SPEECH, // 音頻內(nèi)容類型:語音
  16. usage: audio.StreamUsage.STREAM_USAGE_VOICE_COMMUNICATION, // 音頻流使用類型:語音通信
  17. rendererFlags: 0 // 音頻渲染器標(biāo)志:默認(rèn)為0即可
  18. }
  19. private audioRendererOptions = {
  20. streamInfo: this.audioStreamInfo,
  21. rendererInfo: this.audioRendererInfo
  22. }
  23. // 初始化,創(chuàng)建實(shí)例,設(shè)置監(jiān)聽事件
  24. init() {
  25. audio.createAudioRenderer(this.audioRendererOptions, (err, renderer) => { // 創(chuàng)建AudioRenderer實(shí)例
  26. if (!err) {
  27. console.info(`${TAG}: creating AudioRenderer success`);
  28. this.renderModel = renderer;
  29. this.renderModel.on('stateChange', (state) => { // 設(shè)置監(jiān)聽事件,當(dāng)轉(zhuǎn)換到指定的狀態(tài)時(shí)觸發(fā)回調(diào)
  30. if (state == 1) {
  31. console.info('audio renderer state is: STATE_PREPARED');
  32. }
  33. if (state == 2) {
  34. console.info('audio renderer state is: STATE_RUNNING');
  35. }
  36. });
  37. this.renderModel.on('markReach', 1000, (position) => { // 訂閱markReach事件,當(dāng)渲染的幀數(shù)達(dá)到1000幀時(shí)觸發(fā)回調(diào)
  38. if (position == 1000) {
  39. console.info('ON Triggered successfully');
  40. }
  41. });
  42. } else {
  43. console.info(`${TAG}: creating AudioRenderer failed, error: ${err.message}`);
  44. }
  45. });
  46. }
  47. // 開始一次音頻渲染
  48. async start() {
  49. let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
  50. if (stateGroup.indexOf(this.renderModel.state) === -1) { // 當(dāng)且僅當(dāng)狀態(tài)為STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一時(shí)才能啟動(dòng)渲染
  51. console.error(TAG + 'start failed');
  52. return;
  53. }
  54. await this.renderModel.start(); // 啟動(dòng)渲染
  55. const bufferSize = await this.renderModel.getBufferSize();
  56. // 此處僅以讀取音頻文件的數(shù)據(jù)舉例,實(shí)際音頻通話開發(fā)中,需要讀取的是通話對端傳輸來的音頻數(shù)據(jù)
  57. let context = getContext(this);
  58. let path = context.filesDir;
  59. const filePath = path + '/voice_call_data.wav'; // 沙箱路徑,實(shí)際路徑為/data/storage/el2/base/haps/entry/files/voice_call_data.wav
  60. let file = fs.openSync(filePath, fs.OpenMode.READ_ONLY);
  61. let stat = await fs.stat(filePath);
  62. let buf = new ArrayBuffer(bufferSize);
  63. let len = stat.size % bufferSize === 0 ? Math.floor(stat.size / bufferSize) : Math.floor(stat.size / bufferSize + 1);
  64. for (let i = 0; i < len; i++) {
  65. let options = {
  66. offset: i * bufferSize,
  67. length: bufferSize
  68. };
  69. let readsize = await fs.read(file.fd, buf, options);
  70. // buf是要寫入緩沖區(qū)的音頻數(shù)據(jù),在調(diào)用AudioRenderer.write()方法前可以進(jìn)行音頻數(shù)據(jù)的預(yù)處理,實(shí)現(xiàn)個(gè)性化的音頻播放功能,AudioRenderer會(huì)讀出寫入緩沖區(qū)的音頻數(shù)據(jù)進(jìn)行渲染
  71. let writeSize = await new Promise((resolve, reject) => {
  72. this.renderModel.write(buf, (err, writeSize) => {
  73. if (err) {
  74. reject(err);
  75. } else {
  76. resolve(writeSize);
  77. }
  78. });
  79. });
  80. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) { // 如果渲染器狀態(tài)為STATE_RELEASED,停止渲染
  81. fs.close(file);
  82. await this.renderModel.stop();
  83. }
  84. if (this.renderModel.state === audio.AudioState.STATE_RUNNING) {
  85. if (i === len - 1) { // 如果音頻文件已經(jīng)被讀取完,停止渲染
  86. fs.close(file);
  87. await this.renderModel.stop();
  88. }
  89. }
  90. }
  91. }
  92. // 暫停渲染
  93. async pause() {
  94. // 只有渲染器狀態(tài)為STATE_RUNNING的時(shí)候才能暫停
  95. if (this.renderModel.state !== audio.AudioState.STATE_RUNNING) {
  96. console.info('Renderer is not running');
  97. return;
  98. }
  99. await this.renderModel.pause(); // 暫停渲染
  100. if (this.renderModel.state === audio.AudioState.STATE_PAUSED) {
  101. console.info('Renderer is paused.');
  102. } else {
  103. console.error('Pausing renderer failed.');
  104. }
  105. }
  106. // 停止渲染
  107. async stop() {
  108. // 只有渲染器狀態(tài)為STATE_RUNNING或STATE_PAUSED的時(shí)候才可以停止
  109. if (this.renderModel.state !== audio.AudioState.STATE_RUNNING && this.renderModel.state !== audio.AudioState.STATE_PAUSED) {
  110. console.info('Renderer is not running or paused.');
  111. return;
  112. }
  113. await this.renderModel.stop(); // 停止渲染
  114. if (this.renderModel.state === audio.AudioState.STATE_STOPPED) {
  115. console.info('Renderer stopped.');
  116. } else {
  117. console.error('Stopping renderer failed.');
  118. }
  119. }
  120. // 銷毀實(shí)例,釋放資源
  121. async release() {
  122. // 渲染器狀態(tài)不是STATE_RELEASED狀態(tài),才能release
  123. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
  124. console.info('Renderer already released');
  125. return;
  126. }
  127. await this.renderModel.release(); // 釋放資源
  128. if (this.renderModel.state === audio.AudioState.STATE_RELEASED) {
  129. console.info('Renderer released');
  130. } else {
  131. console.error('Renderer release failed.');
  132. }
  133. }
  134. }

使用AudioCapturer錄制本端的通話聲音

該過程與使用AudioCapturer開發(fā)音頻錄制功能過程相似,關(guān)鍵區(qū)別在于audioCapturerInfo參數(shù)和音頻數(shù)據(jù)流向。audioCapturerInfo參數(shù)中音源類型需設(shè)置為語音通話,SOURCE_TYPE_VOICE_COMMUNICATION。
  1. import audio from '@ohos.multimedia.audio';
  2. import fs from '@ohos.file.fs';
  3. const TAG = 'VoiceCallDemoForAudioCapturer';
  4. // 與使用AudioCapturer開發(fā)音頻錄制功能過程相似,關(guān)鍵區(qū)別在于audioCapturerInfo參數(shù)和音頻數(shù)據(jù)流向
  5. export default class VoiceCallDemoForAudioCapturer {
  6. private audioCapturer = undefined;
  7. private audioStreamInfo = {
  8. samplingRate: audio.AudioSamplingRate.SAMPLE_RATE_44100, // 采樣率
  9. channels: audio.AudioChannel.CHANNEL_1, // 通道
  10. sampleFormat: audio.AudioSampleFormat.SAMPLE_FORMAT_S16LE, // 采樣格式
  11. encodingType: audio.AudioEncodingType.ENCODING_TYPE_RAW // 編碼格式
  12. }
  13. private audioCapturerInfo = {
  14. // 需使用通話場景相應(yīng)的參數(shù)
  15. source: audio.SourceType.SOURCE_TYPE_VOICE_COMMUNICATION, // 音源類型:語音通話
  16. capturerFlags: 0 // 音頻采集器標(biāo)志:默認(rèn)為0即可
  17. }
  18. private audioCapturerOptions = {
  19. streamInfo: this.audioStreamInfo,
  20. capturerInfo: this.audioCapturerInfo
  21. }
  22. // 初始化,創(chuàng)建實(shí)例,設(shè)置監(jiān)聽事件
  23. init() {
  24. audio.createAudioCapturer(this.audioCapturerOptions, (err, capturer) => { // 創(chuàng)建AudioCapturer實(shí)例
  25. if (err) {
  26. console.error(`Invoke createAudioCapturer failed, code is ${err.code}, message is ${err.message}`);
  27. return;
  28. }
  29. console.info(`${TAG}: create AudioCapturer success`);
  30. this.audioCapturer = capturer;
  31. this.audioCapturer.on('markReach', 1000, (position) => { // 訂閱markReach事件,當(dāng)采集的幀數(shù)達(dá)到1000時(shí)觸發(fā)回調(diào)
  32. if (position === 1000) {
  33. console.info('ON Triggered successfully');
  34. }
  35. });
  36. this.audioCapturer.on('periodReach', 2000, (position) => { // 訂閱periodReach事件,當(dāng)采集的幀數(shù)達(dá)到2000時(shí)觸發(fā)回調(diào)
  37. if (position === 2000) {
  38. console.info('ON Triggered successfully');
  39. }
  40. });
  41. });
  42. }
  43. // 開始一次音頻采集
  44. async start() {
  45. let stateGroup = [audio.AudioState.STATE_PREPARED, audio.AudioState.STATE_PAUSED, audio.AudioState.STATE_STOPPED];
  46. if (stateGroup.indexOf(this.audioCapturer.state) === -1) { // 當(dāng)且僅當(dāng)狀態(tài)為STATE_PREPARED、STATE_PAUSED和STATE_STOPPED之一時(shí)才能啟動(dòng)采集
  47. console.error(`${TAG}: start failed`);
  48. return;
  49. }
  50. await this.audioCapturer.start(); // 啟動(dòng)采集
  51. // 此處僅以將音頻數(shù)據(jù)寫入文件舉例,實(shí)際音頻通話開發(fā)中,需要將本端采集的音頻數(shù)據(jù)編碼打包,通過網(wǎng)絡(luò)發(fā)送給通話對端
  52. let context = getContext(this);
  53. const path = context.filesDir + '/voice_call_data.wav'; // 采集到的音頻文件存儲路徑
  54. let file = fs.openSync(path, 0o2 | 0o100); // 如果文件不存在則創(chuàng)建文件
  55. let fd = file.fd;
  56. let numBuffersToCapture = 150; // 循環(huán)寫入150次
  57. let count = 0;
  58. while (numBuffersToCapture) {
  59. let bufferSize = await this.audioCapturer.getBufferSize();
  60. let buffer = await this.audioCapturer.read(bufferSize, true);
  61. let options = {
  62. offset: count * bufferSize,
  63. length: bufferSize
  64. };
  65. if (buffer === undefined) {
  66. console.error(`${TAG}: read buffer failed`);
  67. } else {
  68. let number = fs.writeSync(fd, buffer, options);
  69. console.info(`${TAG}: write date: ${number}`);
  70. }
  71. numBuffersToCapture--;
  72. count++;
  73. }
  74. }
  75. // 停止采集
  76. async stop() {
  77. // 只有采集器狀態(tài)為STATE_RUNNING或STATE_PAUSED的時(shí)候才可以停止
  78. if (this.audioCapturer.state !== audio.AudioState.STATE_RUNNING && this.audioCapturer.state !== audio.AudioState.STATE_PAUSED) {
  79. console.info('Capturer is not running or paused');
  80. return;
  81. }
  82. await this.audioCapturer.stop(); // 停止采集
  83. if (this.audioCapturer.state === audio.AudioState.STATE_STOPPED) {
  84. console.info('Capturer stopped');
  85. } else {
  86. console.error('Capturer stop failed');
  87. }
  88. }
  89. // 銷毀實(shí)例,釋放資源
  90. async release() {
  91. // 采集器狀態(tài)不是STATE_RELEASED或STATE_NEW狀態(tài),才能release
  92. if (this.audioCapturer.state === audio.AudioState.STATE_RELEASED || this.audioCapturer.state === audio.AudioState.STATE_NEW) {
  93. console.info('Capturer already released');
  94. return;
  95. }
  96. await this.audioCapturer.release(); // 釋放資源
  97. if (this.audioCapturer.state == audio.AudioState.STATE_RELEASED) {
  98. console.info('Capturer released');
  99. } else {
  100. console.error('Capturer release failed');
  101. }
  102. }
  103. }
以上內(nèi)容是否對您有幫助:
在線筆記
App下載
App下載

掃描二維碼

下載編程獅App

公眾號
微信公眾號

編程獅公眾號