上篇是视频解码播放,这篇讲音频解码播放,解码过程同视频解码,不过取的是音频流,android播放用的audiotracker,因为实现简单。
编写Android代码
因为是audiotracker播放,在代理类里除了编写native方法,还要提供给c调用的返回audiotracker的设定采样率和声道的createAudioTrack()。
public class YoungPlayer {
public native void render(String input,Surface surface);
public native void sound(String input,String output);
public native void play(String input,Surface surface);
static{
System.loadLibrary(
"avutil-54");
System.loadLibrary(
"swresample-1");
System.loadLibrary(
"avcodec-56");
System.loadLibrary(
"avformat-56");
System.loadLibrary(
"swscale-3");
System.loadLibrary(
"postproc-53");
System.loadLibrary(
"avfilter-5");
System.loadLibrary(
"avdevice-56");
System.loadLibrary(
"yuv");
System.loadLibrary(
"myffmpeg");
}
public AudioTrack
createAudioTrack(
int sampleRateInHz,
int nb_channels){
int audioFormat = AudioFormat.ENCODING_PCM_16BIT;
Log.i(
"yang",
"nb_channels:"+nb_channels);
int channelConfig;
if(nb_channels ==
1){
channelConfig = android.media.AudioFormat.CHANNEL_OUT_MONO;
}
else if(nb_channels ==
2){
channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
}
else{
channelConfig = android.media.AudioFormat.CHANNEL_OUT_STEREO;
}
int bufferSizeInBytes = AudioTrack.getMinBufferSize(sampleRateInHz, channelConfig, audioFormat);
AudioTrack audioTrack =
new AudioTrack(
AudioManager.STREAM_MUSIC,
sampleRateInHz, channelConfig,
audioFormat,
bufferSizeInBytes, AudioTrack.MODE_STREAM);
return audioTrack;
}
}
调用代码:
public void sound(View btn){
String video = sp_video.getSelectedItem().toString();
final String input =
new File(Environment.getExternalStorageDirectory(),video).getAbsolutePath();
final String output =
new File(Environment.getExternalStorageDirectory(),
"Output.pcm").getAbsolutePath();
new Thread(
new Runnable() {
public void run() {
player.sound(input,output);
}
}).start();
}
C/C++代码实现音频文件播放
#include "com_yang_ffmpegDemo_YoungPlayer.h"
#include <stdlib.h>
#include <unistd.h>
#include <android/log.h>
#define LOGI(FORMAT,...) __android_log_print(ANDROID_LOG_INFO,"jason",FORMAT,##__VA_ARGS__);
#define LOGE(FORMAT,...) __android_log_print(ANDROID_LOG_ERROR,"jason",FORMAT,##__VA_ARGS__);
#define MAX_AUDIO_FRME_SIZE 48000 * 4
#include "libavformat/avformat.h"
#include "libavcodec/avcodec.h"
#include "libswscale/swscale.h"
#include "libswresample/swresample.h"
JNIEXPORT
void JNICALL Java_com_yang_ffmpegDemo_YoungPlayer_sound
(JNIEnv *env, jobject jthiz, jstring input_jstr, jstring output_jstr){
const char* input_cstr = (*env)->GetStringUTFChars(env,input_jstr,
NULL);
const char* output_cstr = (*env)->GetStringUTFChars(env,output_jstr,
NULL);
LOGI(
"%s",
"sound");
av_register_all();
AVFormatContext *pFormatCtx = avformat_alloc_context();
if(avformat_open_input(&pFormatCtx,input_cstr,
NULL,
NULL) !=
0){
LOGI(
"%s",
"无法打开音频文件");
return;
}
if(avformat_find_stream_info(pFormatCtx,
NULL) <
0){
LOGI(
"%s",
"无法获取输入文件信息");
return;
}
int i =
0, audio_stream_idx = -
1;
for(; i < pFormatCtx->nb_streams;i++){
if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO){
audio_stream_idx = i;
break;
}
}
AVCodecContext *codecCtx = pFormatCtx->streams[audio_stream_idx]->codec;
AVCodec *codec = avcodec_find_decoder(codecCtx->codec_id);
if(codec ==
NULL){
LOGI(
"%s",
"无法获取解码器");
return;
}
if(avcodec_open2(codecCtx,codec,
NULL) <
0){
LOGI(
"%s",
"无法打开解码器");
return;
}
AVPacket *packet = (AVPacket *)av_malloc(
sizeof(AVPacket));
AVFrame *frame = av_frame_alloc();
SwrContext *swrCtx = swr_alloc();
enum AVSampleFormat in_sample_fmt = codecCtx->sample_fmt;
enum AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
int in_sample_rate = codecCtx->sample_rate;
int out_sample_rate = in_sample_rate;
uint64_t in_ch_layout = codecCtx->channel_layout;
uint64_t out_ch_layout = AV_CH_LAYOUT_STEREO;
swr_alloc_set_opts(swrCtx,
out_ch_layout,out_sample_fmt,out_sample_rate,
in_ch_layout,in_sample_fmt,in_sample_rate,
0,
NULL);
swr_init(swrCtx);
int out_channel_nb = av_get_channel_layout_nb_channels(out_ch_layout);
jclass player_class = (*env)->GetObjectClass(env,jthiz);
jmethodID create_audio_track_mid = (*env)->GetMethodID(env,player_class,
"createAudioTrack",
"(II)Landroid/media/AudioTrack;");
jobject audio_track = (*env)->CallObjectMethod(env,jthiz,create_audio_track_mid,out_sample_rate,out_channel_nb);
jclass audio_track_class = (*env)->GetObjectClass(env,audio_track);
jmethodID audio_track_play_mid = (*env)->GetMethodID(env,audio_track_class,
"play",
"()V");
(*env)->CallVoidMethod(env,audio_track,audio_track_play_mid);
jmethodID audio_track_write_mid = (*env)->GetMethodID(env,audio_track_class,
"write",
"([BII)I");
FILE *fp_pcm = fopen(output_cstr,
"wb");
uint8_t *out_buffer = (uint8_t *)av_malloc(MAX_AUDIO_FRME_SIZE);
int got_frame =
0,index =
0, ret;
while(av_read_frame(pFormatCtx,packet) >=
0){
if(packet->stream_index == audio_stream_idx){
ret = avcodec_decode_audio4(codecCtx,frame,&got_frame,packet);
if(ret <
0){
LOGI(
"%s",
"解码完成");
}
if(got_frame >
0){
LOGI(
"解码:%d",index++);
swr_convert(swrCtx, &out_buffer, MAX_AUDIO_FRME_SIZE,(
const uint8_t **)frame->data,frame->nb_samples);
int out_buffer_size = av_samples_get_buffer_size(
NULL, out_channel_nb,
frame->nb_samples, out_sample_fmt,
1);
fwrite(out_buffer,
1,out_buffer_size,fp_pcm);
jbyteArray audio_sample_array = (*env)->NewByteArray(env,out_buffer_size);
jbyte* sample_bytep = (*env)->GetByteArrayElements(env,audio_sample_array,
NULL);
memcpy(sample_bytep,out_buffer,out_buffer_size);
(*env)->ReleaseByteArrayElements(env,audio_sample_array,sample_bytep,
0);
(*env)->CallIntMethod(env,audio_track,audio_track_write_mid,
audio_sample_array,
0,out_buffer_size);
(*env)->DeleteLocalRef(env,audio_sample_array);
usleep(
1000 *
16);
}
}
av_free_packet(packet);
}
av_frame_free(&frame);
av_free(out_buffer);
swr_free(&swrCtx);
avcodec_close(codecCtx);
avformat_close_input(&pFormatCtx);
(*env)->ReleaseStringUTFChars(env,input_jstr,input_cstr);
(*env)->ReleaseStringUTFChars(env,output_jstr,output_cstr);
}
以上通过C调用Java类audiotracker的播放功能实现,仅仅几行代码就实现了。 PCM 数据播放在开发中也经常使用,例如自己编写播放器,解码之后的音频PCM数据,就可以通过OpenSL 播放,比用Java层的AudioTrack更快,延迟更低。如果要达到更好的效果可以参考Android+FFmpeg+OpenSL ES音频解码播放。