功能需求:首先判断有没有获取麦克风权限?然后根据音频流判断是否有声音?如果音量大于0就表示有声音。然后有一个动画。会提示话筒是有声音的状态还是没声音的状态?

 

 

关键代码:

let audioContext = new AudioContext
        // 将麦克风的声音输入这个对象
        let mediaStreamSource = audioContext.createMediaStreamSource(stream)
        // 创建一个音频分析对象,采样的缓冲区大小为4096,输入和输出都是单声道
        let scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1)
        // 将该分析对象与麦克风音频进行连接
        mediaStreamSource.connect(scriptProcessor)
        // 此举无甚效果,仅仅是因为解决 Chrome 自身的 bug
        scriptProcessor.connect(audioContext.destination)
        // 开始处理音频
        scriptProcessor.onaudioprocess = function(e) {
          // 获得缓冲区的输入音频,转换为包含了PCM通道数据的32位浮点数组
          let buffer = e.inputBuffer.getChannelData(0)
          // 获取缓冲区中最大的音量值
          let maxVal = Math.max.apply(Math, buffer)
          // 显示音量值
          // console.log('显示音量:')
          //
          // console.log(Math.round(maxVal * 100))
          // 如果有声音的话,值为true,通过语音流去判断话筒是否有声音
          if (Math.round(maxVal * 100) > 0) {

            _that.haveVoice = true
            _that.$set(_that, 'haveVoice', true)
          } else {
            _that.haveVoice = false
            _that.$set(_that, 'haveVoice', false)
          }
        }

 

完整代码:

<template>
  <div class="speechSynthesis-box">
    <!--    在线会议页面-->
    <el-button type="primary" v-show="showAddButton" @click="showAddPrimary=true;showBottomBox=false">新建会议实时转写
    </el-button>
    <div class="top-box" v-show="showAddPrimary">
      <h3>填写新增实时转写信息</h3>
      <el-form ref="form" :model="form" label-width="80px" :rules="rules">
        <!-- <el-form-item label="开始时间">
          <el-date-picker type="date" placeholder="年/月/日" v-model="form.date1" style="width: 100%;"></el-date-picker>
        </el-form-item> -->
        <el-form-item label="会议主题" prop="title">
          <el-input v-model="form.title" style="width: 90%;"></el-input>
        </el-form-item>
        <el-form-item label="主讲人" prop="accout">
          <el-input v-model="form.accout" style="width: 90%;"></el-input>
        </el-form-item>
        <el-form-item>
          <el-button type="primary" @click="beginMeeting" id="taste_button">开始</el-button>
        </el-form-item>
      </el-form>
    </div>
    <div class="bottom-box" v-show="showBottomBox">
      <!--    语音合成限制在约2万字以内-->
      <h3>实时会议语音转写</h3>
      <!--根据麦克风情况显示声音变化-->
      <div class="voice-box">
        <!--        <my-wave-surfer :voice-src="voiceSrc"/>-->
        <div class="time-box">
          <!--          如果没声音-->
          <div v-if="!haveVoice">
            <!--            <img src="./src/assets/image/novoice.png" alt="a">-->
            <img :src="noVoiceImg">
            <span>未检测到话筒声音!~~~</span>
          </div>
          <!--          如果有声音-->
          <div v-else>
            <span class="start-taste-line">
	              <hr class="hr hr1">
	              <hr class="hr hr2">
	              <hr class="hr hr3">
	              <hr class="hr hr4">
	              <hr class="hr hr5">
	              <hr class="hr hr6">
	              <hr class="hr hr7">
	              <hr class="hr hr8">
	              <hr class="hr hr9">
	              <hr class="hr hr10">
            </span>
          </div>
        </div>
      </div>
      <div class="voice-content" ref="voiceContentHtml">
        <el-input
          v-if=" isEditable"
          type="textarea"
          :rows="10"
          maxlength="20000"
          show-word-limit
          placeholder="请输入内容" class="voice-content-input"
          v-model="voiceContent"
        >
        </el-input>
        <p v-else>{{ voiceContent }}</p>
      </div>
      <div class="button-box">
        <!--        <el-button @click="dialogVisible = false">取 消</el-button>-->
        <!-- <div class="btn1">
          <el-button v-if="! isEditable"
                     @click=" isEditable=! isEditable"
                     type="success"
          >编辑
          </el-button>
          <el-button v-else @click="editVoiceWord" type="success">保存</el-button>
        </div> -->
        <div class="btn2">
          <el-button type="danger" class="end-btn" @click="finish">结束</el-button>
          <!--          <a :href="voiceSrc" target="_blank"><el-button type="primary">下载</el-button></a>-->
        </div>
      </div>
    </div>


  </div>
</template>

<script>
import myWaveSurfer from '@/components/MyWaveSurfer/index'
import { addMeeting, finishMeeting } from '@/api/web/onlineAst'
import '@/api/onlineAst/index.css'
import '@/api/onlineAst/audio-data.js'
import { HZRecorder } from '@/utils/HZRecorder.js'
import { Loading } from 'element-ui'
import { getToken } from "@/utils/auth";
let loading;

const startLoading = () => {
  loading = Loading.service({
    lock: true,
    text: '正在保存中...',
    background: 'rgba(0,0,0,0.7)'
  })
}

// import Recorder from 'js-audio-recorder'
// const lamejs = require('lamejs')
// const recorder = new Recorder({
//     sampleBits: 16,                 // 采样位数,支持 8 或 16,默认是16
//     sampleRate: 48000,              // 采样率,支持 11025、16000、22050、24000、44100、48000,根据浏览器默认值,我的chrome是48000
//     numChannels: 1,                 // 声道,支持 1 或 2, 默认是1
//     // compiling: false, // (0.x版本中生效,1.x增加中)  是否边录边转换,默认是false
// })

// var vm = null;

// // 绑定事件-打印的是当前录音数据
// recorder.onprogress = function(params) {
//     // console.log('--------------START---------------')
//     // console.log('录音时长(秒)', params.duration);
//     // console.log('录音大小(字节)', params.fileSize);
//     // console.log('录音音量百分比(%)', params.vol);
//     console.log('params');
//     console.log(params);

//     // console.log('--------------END---------------')
//     vm.sendData(params.data)
// }

export default {
  name: 'onlineConference',
  components: {
    myWaveSurfer
  },
  data() {
    return {
      noVoiceImg: require('./../../../../src/assets/image/novoice.png'),
      haveVoice: false,//默认无声
      form: {
        name: '',
        region: '',
        date1: '',
        date2: '',
        delivery: false,
        type: [],
        resource: '',
        desc: '',
        title: '',
        accout: '',
        userName: ''
      },
      notSupportTip: '请试用chrome浏览器且域名为localhost或127.0.0.1测试',
      isRecordEnd: false,
      bizId: '',
      userName: '',
      state: 'ing',
      ws: undefined,
      password: '888888',
      path: 'ws://127.0.0.1:8080/websocket/onlineVoice/realTimeTrans/{userName}/{password}/{bizId}',
      voiceSrc: '',
      // voiceSrc: 'http://192.168.3.174:8081/profile/uploadPath/tempSynthesisVoice/test.mp3',
      voiceContent: '',
      isEditable: false,
      showAddButton: true,
      showAddPrimary: false,
      showBottomBox: false,
      // 表单校验
      rules: {
        title: [
          { required: true, message: '主题不能为空', trigger: 'blur' }
        ],
        accout: [
          { required: true, message: '主讲人不能为空', trigger: 'blur' }
        ]
      }
    }
  },
  // created: function() {
  //   vm = this
  // },
  methods: {
    init: function() {
      try {
        //<!-- 检查是否能够调用麦克风 -->
        // debugger
        window.AudioContext = window.AudioContext || window.webkitAudioContext
        navigator.getUserMedia = navigator.getUserMedia || navigator.webkitGetUserMedia
        window.URL = window.URL || window.webkitURL

        // audio_context = new AudioContext;
        console.log('navigator.getUserMedia ' + (navigator.getUserMedia ? 'available.' : 'not present!'))
      } catch (e) {
        alert('No web audio support in this browser!')
      }
      // debugger
      let _that = this
      navigator.getUserMedia({ audio: true }, function(stream) {
        _that.recorder = new HZRecorder(stream)
        // debugger
        console.log('初始化完成:')
        _that.recorder.start()
        _that.connectWebsocket()
        console.log('开始录音')
        console.log('stream:')
        console.log(stream)

        let audioContext = new AudioContext
        // 将麦克风的声音输入这个对象
        let mediaStreamSource = audioContext.createMediaStreamSource(stream)
        // 创建一个音频分析对象,采样的缓冲区大小为4096,输入和输出都是单声道
        let scriptProcessor = audioContext.createScriptProcessor(4096, 1, 1)
        // 将该分析对象与麦克风音频进行连接
        mediaStreamSource.connect(scriptProcessor)
        // 此举无甚效果,仅仅是因为解决 Chrome 自身的 bug
        scriptProcessor.connect(audioContext.destination)
        // 开始处理音频
        scriptProcessor.onaudioprocess = function(e) {
          // 获得缓冲区的输入音频,转换为包含了PCM通道数据的32位浮点数组
          let buffer = e.inputBuffer.getChannelData(0)
          // 获取缓冲区中最大的音量值
          let maxVal = Math.max.apply(Math, buffer)
          // 显示音量值
          // console.log('显示音量:')
          //
          // console.log(Math.round(maxVal * 100))
          // 如果有声音的话,值为true,通过语音流去判断话筒是否有声音
          if (Math.round(maxVal * 100) > 0) {

            _that.haveVoice = true
            _that.$set(_that, 'haveVoice', true)
          } else {
            _that.haveVoice = false
            _that.$set(_that, 'haveVoice', false)
          }
        }

      }, function(e) {
        console.log('No live audio input: ' + e)
      })
    },
    connectWebsocket() {
      var url = 'ws://10.10.0.54:8980/websocket/onlineVoice/realTimeTrans/' + this.userName + '/' + this.bizId
      // var url = 'ws://127.0.0.1:8081/websocket/onlineVoice/realTimeTrans/' + this.userName + '/' + this.bizId;

      //  debugger
      if ('WebSocket' in window) {
        this.ws = new WebSocket(url)
        console.log('this.ws')
        console.log(this.ws)
      } else if ('MozWebSocket' in window) {
        this.ws = new MozWebSocket(url)
      } else {
        alert(notSupportTip + '2')
        return null
      }
      let _that = this
      this.ws.onopen = (e) => {

        console.log('this.ws.onopen')
        if (_that.ws.readyState !== 1) {
          return
        }

        var buffer = _that.recorder.getBuffer()
        var audioData = buffer.splice(0, 1280)

        // _that.ws.send(new Int8Array(audioData))

        _that.handlerInterval = setInterval(() => {
          // websocket未连接
          if (_that.ws.readyState !== 1) {
            console.log('websocket未连接')
            clearInterval(_that.handlerInterval)
            return
          }
          if (buffer.length == 0 && _that.state == 'end') {
            // _that.ws.send('{\"end\": true}')
            _that.ws.send('end')
            console.log('发送结束标识')

            var blob = this.recorder.upload()
            var fd = new FormData()
            fd.append('file', blob)
            fd.append('bizId', this.bizId)
            fd.append('content', this.voiceContent)
            var xhr = new XMLHttpRequest()

            loading.close()
            this.$notify({
              title: '保存成功',
              message: '',
              type: 'success'
            })
            this.showBottomBox = false
            this.showAddButton = true

            // xhr.open('POST', '/dev-api/web/onlineAst/finishMeeting')
            xhr.open('POST', '/prod-api/web/onlineAst/finishMeeting')
            xhr.setRequestHeader('Authorization', 'Bearer ' + getToken());
            xhr.send(fd)

            clearInterval(_that.handlerInterval)
            location.reload()
            return
          }
          var audioData = buffer.splice(0, 1280)
          if (audioData.length > 0) {
            _that.ws.send(new Int8Array(audioData))
          }
        }, 40)
      }
      this.ws.onmessage = (e) => {
        // this.config.onMessage && this.config.onMessage(e)
        this.wsOnMessage(e)
      }
      this.ws.onerror = (e) => {
        // this.stop()
        console.log('关闭连接ws.onerror')
      }
      this.ws.onclose = (e) => {
        // this.stop()
        console.log('关闭连接ws.onclose')
      }

    },
    wsOnMessage(e) {
      let jsonData = JSON.parse(e.data)
      console.log('jsonData')
      console.log(jsonData)
      if (jsonData.data == undefined || jsonData.data == null) {
        return
      }
      let msgData = this.byteToString(jsonData.data)

      let tempObj = JSON.parse(msgData)
      if (tempObj.msgtype != 'sentence') {
        return
      }

      let str = ''
      if (tempObj.ws == undefined || tempObj.ws == null) {
        return
      }

      tempObj.ws.forEach(k => {
        k.cw.forEach(l => {
          str += l.w
        })
      })
      console.log('str')
      console.log(str)

      this.voiceContent += str
    },
    //byte数组转字符串
    byteToString(utf8Bytes) {
      if (typeof utf8Bytes === 'string') {
        return utf8Bytes
      }
      console.log('utf8Bytes')
      console.log(utf8Bytes)
      var unicodeStr = ''
      for (var pos = 0; pos < utf8Bytes.length;) {
        var flag = utf8Bytes[pos]
        var unicode = 0
        if ((flag >>> 7) === 0) {
          unicodeStr += String.fromCharCode(utf8Bytes[pos])
          pos += 1

        } else if ((flag & 0xFC) === 0xFC) {
          unicode = (utf8Bytes[pos] & 0x3) << 30
          unicode |= (utf8Bytes[pos + 1] & 0x3F) << 24
          unicode |= (utf8Bytes[pos + 2] & 0x3F) << 18
          unicode |= (utf8Bytes[pos + 3] & 0x3F) << 12
          unicode |= (utf8Bytes[pos + 4] & 0x3F) << 6
          unicode |= (utf8Bytes[pos + 5] & 0x3F)
          unicodeStr += String.fromCodePoint(unicode)
          pos += 6

        } else if ((flag & 0xF8) === 0xF8) {
          unicode = (utf8Bytes[pos] & 0x7) << 24
          unicode |= (utf8Bytes[pos + 1] & 0x3F) << 18
          unicode |= (utf8Bytes[pos + 2] & 0x3F) << 12
          unicode |= (utf8Bytes[pos + 3] & 0x3F) << 6
          unicode |= (utf8Bytes[pos + 4] & 0x3F)
          unicodeStr += String.fromCodePoint(unicode)
          pos += 5

        } else if ((flag & 0xF0) === 0xF0) {
          unicode = (utf8Bytes[pos] & 0xF) << 18
          unicode |= (utf8Bytes[pos + 1] & 0x3F) << 12
          unicode |= (utf8Bytes[pos + 2] & 0x3F) << 6
          unicode |= (utf8Bytes[pos + 3] & 0x3F)
          unicodeStr += String.fromCodePoint(unicode)
          pos += 4

        } else if ((flag & 0xE0) === 0xE0) {
          unicode = (utf8Bytes[pos] & 0x1F) << 12

          unicode |= (utf8Bytes[pos + 1] & 0x3F) << 6
          unicode |= (utf8Bytes[pos + 2] & 0x3F)
          unicodeStr += String.fromCharCode(unicode)
          pos += 3

        } else if ((flag & 0xC0) === 0xC0) { //110
          unicode = (utf8Bytes[pos] & 0x3F) << 6
          unicode |= (utf8Bytes[pos + 1] & 0x3F)
          unicodeStr += String.fromCharCode(unicode)
          pos += 2

        } else {
          unicodeStr += String.fromCharCode(utf8Bytes[pos])
          pos += 1
        }
      }
      return unicodeStr
    },
    beginMeeting() {
      this.$refs['form'].validate(valid => {
        if (valid) {
          console.log('开始会议')
          this.showAddButton = false
          this.showAddPrimary = false
          this.showBottomBox = true
          this.userName = this.$store.state.user.name
          // this.password = '888888';
          addMeeting(this.form).then(Response => {
            this.bizId = Response.data.bizId
            this.userName = Response.data.operator
            this.init()
          })
        }
      })
    },
    finish() {
      this.state = 'end'
      this.recorder.stop()
      startLoading();

      // // var blob = this.recorder.upload()
      // var fd = new FormData()
      // // fd.append('file', blob)
      // fd.append('bizId', 1000022)
      // fd.append('content', '5266666666')
      // var xhr = new XMLHttpRequest()
      // xhr.open('POST', '/dev-api/web/onlineAst/finishMeeting')
      // // xhr.setRequestHeader('httpType', 'ajax');
      // xhr.setRequestHeader('Authorization', 'Bearer ' + getToken());

      
      // // xhr.open('POST', '/prod-api/web/onlineAst/finishMeeting')
      // xhr.send(fd)
    }

  }
}

</script>
<style>
.top-box{

}
</style>
<style lang="scss" scoped>
.speechSynthesis-box {
  //background-color: lightskyblue;
  margin: 30px 20px;
  min-width: 990px;

  .top-box {
    padding: 20px;
    border: 1px solid rgb(220, 223, 230);
    margin: 30px 0;
  }

  .bottom-box {
    padding: 20px;
    border: 1px solid rgb(220, 223, 230);
    margin: 30px 0;
    //播放器
    .voice-box {
      width: 100%;

      .time-box {
        margin-top: 40px;
        display: -ms-flexbox;
        display: flex;
        -ms-flex-pack: center;
        justify-content: center;

        img {
          width: 30px;
        }

        .total-time {
          margin-left: 20px;
        }

        .start-taste-line {
          display: inline-block;
          margin-right: 20px;
        }

        .start-taste-line hr {
          background-color: #187cff;
          width: 3px;
          height: 10px;
          margin: 0 5px;
          display: inline-block;
          border: none;
        }
      }
    }

    //识别出来的内容框
    .voice-content {
      margin: 20px auto;
      border: 1px solid rgb(220, 223, 230);
      border-radius: 10px;

      p {
        padding: 10px;
        margin: 0;
        height: 300px;
        overflow-x: hidden;
        overflow-y: auto;
        line-height: 30px;
      }
    }

    //按钮
    .button-box {
      margin-top: 30px;
      width: 40%;
      float: right;
      line-height: 76px;
      //padding-left: 12%;
      //padding-right: 10%;
      .btn1, .btn2 {
        display: inline-block;
        //padding-left: 10%;
        text-align: center;
      }

      .btn1 {
        width: 50%;
      }

      .btn2 {
        width: 50%;

        .end-btn {
          margin-right: 10%;
        }
      }
    }
  }


}
</style>

 

这个问题也浪费了很久。以为很难。其实不难。