一、基本参数说明
private final static int AUDIO_INPUT = MediaRecorder.AudioSource.MIC;
private static final int mStreamType = AudioManager.STREAM_MUSIC;
private final static int AUDIO_SAMPLE_RATE = 44100;
private final static int AUDIO_CHANNEL = AudioFormat.CHANNEL_IN_STEREO;
private final static int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
1.音频输入:AUDIO_INPUT:一般采用麦克风录制
2.音频流类型:mStreamType:
常用的音频流类型有:
- STREAM_ALARM:警告声
- STREAM_MUSIC:音乐声
- STREAM_RING:铃声
- STREAM_SYSTEM:系统声音
- STREAM_VOICE_CALL:电话声音
3.采样频率:AUDIO_SAMPLE_RATE:
定义了每秒从连续信号中提取并组成离散信号的采样个数,用赫兹(Hz)来表示。
44100是目前的标准,但是某些设备仍然支持22050,16000,11025
采样频率一般分为22.05KHz,44.1KHz,48KHz三个等级
4.声道:AUDIO_CHANNEL:
常见有两种:
单声道:CHANNEL_IN_MONO
立体声道:CHANNEL_IN_STEREO
5。返回的音频数据格式:AUDIO_FORMAT:
一般来说,可以设置每个样本的分辨率为16位或8位,分别为:
- ENCODING_PCM_16BIT
- ENCODING_PCM_8BIT
二、必要的实例化对象
private int mBufferSize;
private AudioRecord mAudioRecord;
private AudioTrack mAudioTrack;
private Status mStatus;
private Thread mRecordThread;
private Thread mPlayThread;
private byte[] mAudioBuffer;
private byte[] mPlayBuffer;
private int mMode = AudioTrack.MODE_STREAM;
1.缓冲区字节大小
根据Audio Record的参数设置,其最小的缓冲区大小可以动态获取
mBufferSize = AudioRecord.getMinBufferSize(AUDIO_SAMPLE_RATE, AUDIO_CHANNEL, AUDIO_FORMAT);
2.AudioRecord对象
mAudioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, AUDIO_SAMPLE_RATE, AUDIO_CHANNEL, AUDIO_FORMAT, mBufferSize);
3.AudioTrack对象
mAudioTrack = new AudioTrack(mStreamType, AUDIO_SAMPLE_RATE, AUDIO_CHANNEL,AUDIO_FORMAT, mBufferSize, mMode);
4.录音状态
简单的实现录音播放功能可能不是必要的,但是标记录音的状态是一个良好的习惯。
5.录音和播放线程
因为录音和播放是两个同时进行且不同的过程,所以开启两个线程分别进行录音和播放,分别为
mRecordThread和mPlayThread
6.录音和播放缓冲区
为什么要使用缓冲区?
音频数据的采集和播放是字节数组的形式进行内部传输,如果音频数据源直接对目标发送的话,目标获得第一个字节,便将之播放,然后从数据源读取下一个字节,可是这时就不能保证数据源向目标发送的恰好是第二个字节,为了解决这个问题,我们需要在数据源与数据目标之间放置一个保存完整数据内容的区域--“缓冲区”。
mAudioBuffer = new byte[mBufferSize];
mPlayBuffer = new byte[mBufferSize];
缓冲区的大小一般取决于mBufferSize的值
7.模式:mMode:
常见的模式有两种,分别是:
- MODE_STREAM:通过write一次次把音频数据写到AudioTrack中的内部缓冲区,可能会引起延时
- MODE_STATIC:在play之前先把所有数据通过一次write调用传递到AudioTrack中的内部缓冲区,适合内存占用小,延时要求比较高的文件
三、完整实现
1.开始录音并播放
private ArrayBlockingQueue<byte[]> queue = new ArrayBlockingQueue<byte[]>(200);
private ByteArrayInputStream bis;
public void start() {
mRecordThread = new Thread(fetchAudioRunnable());
mPlayThread = new Thread(playAudioRunnable());
mAudioRecord.startRecording();
mAudioTrack.play();
mRecordThread.start();
mPlayThread.start();
}
private void fetchAudio() {
int size = mAudioRecord.read(mAudioBuffer, 0, mAudioBuffer.length);
if (size > 0) {
try {
queue.put(mAudioBuffer);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
private void playAudio() {
bis = new ByteArrayInputStream(queue.poll());
mPlayBuffer = new byte[mBufferSize];
try {
while ((bis.read(mPlayBuffer)) != -1) {
mAudioTrack.write(mPlayBuffer, 0, mPlayBuffer.length);
}
} catch (IOException e) {
e.printStackTrace();
}
}
private Runnable fetchAudioRunnable() {
return new Runnable() {
@Override
public void run() {
fetchAudio();
}
};
}
private Runnable playAudioRunnable() {
return new Runnable() {
@Override
public void run() {
playAudio();
}
};
}
2.采集音频数据并保存到本地文件,之后利用播放器播放
上面的一边录制一边播放,是利用AudioTrack播放pcm数据
pcm是指音频裸数据,是脉冲拜尼马调制数据,这样的数据是不能被播放器播放的,想要将其保存为音频文件并进行播放,可以考虑将其转为wav格式的文件
wav:微软公司专门为Windows开发的一种标准数字音频文件,该文件能记录各种单声道或立体声的声音信息,并能保证声音不失真。简单来说,wav格式就是pcm文件加上了头信息。
这里创建了一个工具类:
public class PcmToWavUtil {
private int mBufferSize; /* 缓存的音频大小*/
private int mSampleRate; /* 采样率*/
private int mChannel; /* 声道数*/
/**
* @param sampleRate sample rate、采样率
* @param channel channel、声道
* @param encoding Audio data format、音频格式
*/
public PcmToWavUtil(int sampleRate, int channel, int encoding) {
this.mSampleRate = sampleRate;
this.mChannel = channel;
this.mBufferSize = AudioRecord.getMinBufferSize(mSampleRate, mChannel, encoding);
}
/**
* pcm文件转wav文件
*
* @param inFilename 源文件路径
* @param outFilename 目标文件路径
*/
public void pcmToWav(String inFilename, String outFilename) {
FileInputStream in;
FileOutputStream out;
long totalAudioLen;
long totalDataLen;
long longSampleRate = mSampleRate;
int channels = mChannel == AudioFormat.CHANNEL_IN_MONO ? 1 : 2;
long byteRate = 16 * mSampleRate * channels / 8;
byte[] data = new byte[mBufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
writeWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while (in.read(data) != -1) {
out.write(data);
}
in.close();
out.close();
} catch (IOException e) {
e.printStackTrace();
}
}
/**
* 加入wav文件头
*/
private void writeWaveFileHeader(FileOutputStream out, long totalAudioLen, long totalDataLen, long longSampleRate, int channels, long byteRate) throws IOException {
byte[] header = new byte[44];
// RIFF/WAVE header
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
//WAVE
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
// 'fmt ' chunk
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
// 4 bytes: size of 'fmt ' chunk
header[16] = 16;
header[17] = 0;
header[18] = 0;
header[19] = 0;
// format = 1
header[20] = 1;
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
// block align
header[32] = (byte) (2 * 16 / 8);
header[33] = 0;
// bits per sample
header[34] = 16;
header[35] = 0;
//data
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
具体的实现基本一致:
注意:主要就是将采集数据和播放数据的处理从字节I/O转为文件I/O
新增的play()函数,是为采集音频数据并保存为文件后,提供的调用接口
private FileOutputStream os;
private FileInputStream is;
private static final String DIS_FILENAME = Environment.getExternalStorageDirectory().getPath() + "/audiotest";
private static final String WAV_FILENAME = Environment.getExternalStorageDirectory().getPath() + "/audiowav";
public void initFile() {
try {
mPcmToWavUtil = new PcmToWavUtil(AUDIO_SAMPLE_RATE, AUDIO_CHANNEL, AUDIO_FORMAT);
File file = new File(DIS_FILENAME);
if (file.exists()) {
file.delete();
}
os = new FileOutputStream(DIS_FILENAME);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
}
private void fetchAudio() {
int size = mAudioRecord.read(mAudioBuffer, 0, mAudioBuffer.length);
if (size > 0) {
try {
if (os != null) {
os.write(mAudioBuffer);
}
} catch (IOException e) {
e.printStackTrace();
}
}
}
private void playAudio() {
mPlayBuffer = new byte[mBufferSize];
try {
while ((is.read(mPlayBuffer)) != -1) {
mAudioTrack.write(mPlayBuffer, 0, mPlayBuffer.length);
}
} catch (IOException e) {
e.printStackTrace();
}
}
public void play() {
try {
is = new FileInputStream(DIS_FILENAME);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
initTrack();
mAudioTrack.play();
mPlayThread = new Thread(playAudioRunnable());
mPlayThread.start();
}
public void stopRecord() {
mAudioRecord.stop();
mStatus = Status.STATUS_STOP;
release();
}
public void release() {
if (mAudioRecord != null) {
mAudioRecord.release();
mAudioRecord = null;
}
}
public void stopPlay() {
if (mAudioTrack != null) {
mAudioTrack.stop();
mAudioTrack.release();
mAudioTrack = null;
}
}
文件格式的转化并没有加到上面的代码中,想要实现转化很简单,只需要一行
mPcmToWavUtil.pcmToWav(DIS_FILENAME, WAV_FILENAME);
可以加在停止录音接口,这个自选