// tutorial03.c
// A pedagogical video player that will stream through every video frame as fast as it can
// and play audio (out of sync).
//
// Code based on FFplay, Copyright (c) 2003 Fabrice Bellard,
// and a tutorial by Martin Bohme (boehme@inb.uni-luebeckREMOVETHIS.de)
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
// Use
//
// gcc -o tutorial03 tutorial03.c -lavformat -lavcodec -lz -lm `sdl-config --cflags --libs`
// to build (assuming libavformat and libavcodec are correctly installed,
// and assuming you have sdl-config. Please refer to SDL docs for your installation.)
//
// Run using
// tutorial03 myvideofile.mpg
//
// to play the stream on your screen.
// 20130204 albert.liao modified start
//#include
//#include
//#include
//#include
#include
#include
#include
#include
#include
//#undef main /* Prevents SDL from overriding main() */
// 20130204 albert.liao modified end
#ifdef __MINGW32__
#undef main /* Prevents SDL from overriding main() */
#endif
#include
#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
int size;
SDL_mutex *mutex;
SDL_cond *cond;
} PacketQueue;
PacketQueue audioq;
int quit = 0;
void packet_queue_init(PacketQueue *q) {
memset(q, 0, sizeof(PacketQueue));
q->mutex = SDL_CreateMutex();
q->cond = SDL_CreateCond();
}
int packet_queue_put(PacketQueue *q, AVPacket *pkt) {
AVPacketList *pkt1;
if(av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = av_malloc(sizeof(AVPacketList));
if (!pkt1)
return -1;
pkt1->pkt = *pkt;
pkt1->next = NULL;
SDL_LockMutex(q->mutex);
if (!q->last_pkt)
q->first_pkt = pkt1;
else
q->last_pkt->next = pkt1;
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size;
SDL_CondSignal(q->cond);
SDL_UnlockMutex(q->mutex);
return 0;
}
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
SDL_LockMutex(q->mutex);
for(;;) {
if(quit) {
ret = -1;
break;
}
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt)
q->last_pkt = NULL;
q->nb_packets--;
q->size -= pkt1->pkt.size;
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
SDL_CondWait(q->cond, q->mutex);
}
}
SDL_UnlockMutex(q->mutex);
return ret;
}
// 20130204 albert.liao modified start
#if 0
int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
static AVPacket pkt;
static uint8_t *audio_pkt_data = NULL;
static int audio_pkt_size = 0;
int len1, data_size;
for(;;) {
while(audio_pkt_size > 0) {
data_size = buf_size;
len1 = avcodec_decode_audio2(aCodecCtx, (int16_t *)audio_buf, &data_size, audio_pkt_data, audio_pkt_size);
if(len1 < 0) {
/* if error, skip frame */
audio_pkt_size = 0;
break;
}
audio_pkt_data += len1;
audio_pkt_size -= len1;
if(data_size <= 0) {
/* No data yet, get more frames */
continue;
}
/* We have data, return it and come back for more later */
return data_size;
}
if(pkt.data)
av_free_packet(&pkt);
if(quit) {
return -1;
}
if(packet_queue_get(&audioq, &pkt, 1) < 0) {
return -1;
}
audio_pkt_data = pkt.data;
audio_pkt_size = pkt.size;
}
}
#endif
int audio_decode_frame(AVCodecContext *aCodecCtx, AVPacket *pkt,AVPacket *pkt_temp, AVFrame *frame, uint8_t *audio_buf)
{
int len1, data_size;
int got_frame;
int new_packet = 0;
for(;;)
{
while(pkt_temp->size > 0 || (!pkt_temp->data && new_packet))
{
if(!frame)
{
if (!(frame = avcodec_alloc_frame()))
return AVERROR(ENOMEM);
}
else
{
avcodec_get_frame_defaults(frame);
}
new_packet = 0;
len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame, pkt_temp);
if(len1 < 0)
{
/* if error, skip frame */
pkt_temp->size = 0;
break;
}
pkt_temp->data += len1;
pkt_temp->size -= len1;
if (!got_frame)
{
/* stop sending empty packets if the decoder is finished */
continue;
}
data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels, frame->nb_samples, aCodecCtx->sample_fmt, 1);
//audio_buf = frame->data[0];
memcpy(audio_buf,frame->data[0],frame->linesize[0]);
return data_size;
}
if(pkt->data)
av_free_packet(pkt);
memset(pkt_temp, 0, sizeof(*pkt_temp));
if(quit)
{
return -1;
}
if((new_packet = packet_queue_get(&audioq, pkt, 1)) < 0)
{
return -1;
}
*pkt_temp = *pkt;
}
}
// 20130204 albert.liao modified end
void audio_callback(void *userdata, Uint8 *stream, int len) {
AVCodecContext *aCodecCtx = (AVCodecContext *)userdata;
int len1, audio_size;
static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
static unsigned int audio_buf_size = 0;
static unsigned int audio_buf_index = 0;
// 20130204 albert.liao modified start
AVPacket *pkt = av_mallocz(sizeof(AVPacket));
AVPacket *pkt_temp = av_mallocz(sizeof(AVPacket));
AVFrame *frame = NULL;
// 20130204 albert.liao modified end
while(len > 0) {
if(audio_buf_index >= audio_buf_size) {
/* We have already sent all our data; get more */
// 20130204 albert.liao modified start
//audio_size = audio_decode_frame(aCodecCtx, audio_buf, sizeof(audio_buf));
audio_size = audio_decode_frame(aCodecCtx, pkt, pkt_temp, frame, audio_buf);
// 20130204 albert.liao modified end
if(audio_size < 0) {
/* If error, output silence */
audio_buf_size = 1024; // arbitrary?
memset(audio_buf, 0, audio_buf_size);
} else {
audio_buf_size = audio_size;
}
audio_buf_index = 0;
}
len1 = audio_buf_size - audio_buf_index;
if(len1 > len)
len1 = len;
memcpy(stream, (uint8_t *)audio_buf + audio_buf_index, len1);
len -= len1;
stream += len1;
audio_buf_index += len1;
}
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx;
int i, videoStream, audioStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
float aspect_ratio;
// 201302023 albert.liao modified start
struct SwsContext *img_convert_ctx;
// 201302023 albert.liao modified end
AVCodecContext *aCodecCtx;
AVCodec *aCodec;
SDL_Overlay *bmp;
SDL_Surface *screen;
SDL_Rect rect;
SDL_Event event;
SDL_AudioSpec wanted_spec, spec;
if(argc < 2) {
fprintf(stderr, "Usage: test \n");
exit(1);
}
// Register all formats and codecs
av_register_all();
if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
exit(1);
}
// Open video file
// 20130204 albert.liao modified start
// Open video file
//if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
pFormatCtx = avformat_alloc_context();
if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
return -1; // Couldn't open file
// 20130204 albert.liao modified end
// 201302023 albert.liao modified start
//if(av_find_stream_info(pFormatCtx)<0 data-blogger-escaped--1="" data-blogger-escaped-0="" data-blogger-escaped-201302023="" data-blogger-escaped-about="" data-blogger-escaped-albert.liao="" data-blogger-escaped-argv="" data-blogger-escaped-audiostream="-1;" data-blogger-escaped-av_dump_format="" data-blogger-escaped-avformat_find_stream_info="" data-blogger-escaped-couldn="" data-blogger-escaped-dump="" data-blogger-escaped-dump_format="" data-blogger-escaped-end="" data-blogger-escaped-error="" data-blogger-escaped-file="" data-blogger-escaped-find="" data-blogger-escaped-first="" data-blogger-escaped-for="" data-blogger-escaped-i="" data-blogger-escaped-if="" data-blogger-escaped-information="" data-blogger-escaped-modified="" data-blogger-escaped-onto="" data-blogger-escaped-pformatctx-="" data-blogger-escaped-pformatctx="" data-blogger-escaped-return="" data-blogger-escaped-standard="" data-blogger-escaped-stream="" data-blogger-escaped-t="" data-blogger-escaped-the="" data-blogger-escaped-video="" data-blogger-escaped-videostream="-1;">nb_streams; i++) {
// 201302023 albert.liao modified start
//if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream < 0) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO && videoStream < 0) {
videoStream=i;
}
//if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream < 0) {
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) {
audioStream=i;
}
// 201302023 albert.liao modified end
}
if(videoStream==-1)
return -1; // Didn't find a video stream
if(audioStream==-1)
return -1;
aCodecCtx=pFormatCtx->streams[audioStream]->codec;
// Set audio settings from codec info
wanted_spec.freq = aCodecCtx->sample_rate;
wanted_spec.format = AUDIO_S16SYS;
wanted_spec.channels = aCodecCtx->channels;
wanted_spec.silence = 0;
wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
wanted_spec.callback = audio_callback;
wanted_spec.userdata = aCodecCtx;
if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
return -1;
}
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
if(!aCodec) {
fprintf(stderr, "Unsupported codec!\n");
return -1;
}
// 20130204 albert.liao modified start
// Open codec
//if(avcodec_open(aCodecCtx, aCodec)<0 data-blogger-escaped--1="" data-blogger-escaped-20130204="" data-blogger-escaped-acodec="" data-blogger-escaped-acodecctx="" data-blogger-escaped-albert.liao="" data-blogger-escaped-audio_st="pFormatCtx-" data-blogger-escaped-avcodec_open2="" data-blogger-escaped-codec="" data-blogger-escaped-could="" data-blogger-escaped-end="" data-blogger-escaped-if="" data-blogger-escaped-modified="" data-blogger-escaped-not="" data-blogger-escaped-null="" data-blogger-escaped-open="" data-blogger-escaped-return="">streams[index]
packet_queue_init(&audioq);
SDL_PauseAudio(0);
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// 20130204 albert.liao modified start
// Open codec
//if(avcodec_open(pCodecCtx, pCodec)<0 data-blogger-escaped--1="" data-blogger-escaped-20130204="" data-blogger-escaped-__darwin__="" data-blogger-escaped-a="" data-blogger-escaped-albert.liao="" data-blogger-escaped-allocate="" data-blogger-escaped-avcodec_open2="" data-blogger-escaped-codec="" data-blogger-escaped-could="" data-blogger-escaped-end="" data-blogger-escaped-frame="" data-blogger-escaped-if="" data-blogger-escaped-ifndef="" data-blogger-escaped-make="" data-blogger-escaped-modified="" data-blogger-escaped-not="" data-blogger-escaped-null="" data-blogger-escaped-open="" data-blogger-escaped-our="" data-blogger-escaped-pcodec="" data-blogger-escaped-pcodecctx="" data-blogger-escaped-pframe="avcodec_alloc_frame();" data-blogger-escaped-put="" data-blogger-escaped-return="" data-blogger-escaped-screen="SDL_SetVideoMode(pCodecCtx-" data-blogger-escaped-to="" data-blogger-escaped-video="">width, pCodecCtx->height, 0, 0);
#else
screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
if(!screen) {
fprintf(stderr, "SDL: could not set video mode - exiting\n");
exit(1);
}
// Allocate a place to put our YUV image on that screen
bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
pCodecCtx->height,
SDL_YV12_OVERLAY,
screen);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// 20130204 albert.liao modified start
// Decode video frame
// avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// 20130204 albert.liao modified end
// Did we get a video frame?
if(frameFinished) {
SDL_LockYUVOverlay(bmp);
AVPicture pict;
pict.data[0] = bmp->pixels[0];
pict.data[1] = bmp->pixels[2];
pict.data[2] = bmp->pixels[1];
pict.linesize[0] = bmp->pitches[0];
pict.linesize[1] = bmp->pitches[2];
pict.linesize[2] = bmp->pitches[1];
// 20130204 albert.liao modified start
// Convert the image into YUV format that SDL uses
//img_convert(&pict, PIX_FMT_YUV420P,
// (AVPicture *)pFrame, pCodecCtx->pix_fmt,
// pCodecCtx->width, pCodecCtx->height);
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,\
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,\
PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);
// other codes
// Convert the image from its native format to RGB
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,\
0, pCodecCtx->height, pict.data, pict.linesize);
// 20130204 albert.liao modified end
SDL_UnlockYUVOverlay(bmp);
rect.x = 0;
rect.y = 0;
rect.w = pCodecCtx->width;
rect.h = pCodecCtx->height;
SDL_DisplayYUVOverlay(bmp, &rect);
av_free_packet(&packet);
}
} else if(packet.stream_index==audioStream) {
packet_queue_put(&audioq, &packet);
} else {
av_free_packet(&packet);
}
// Free the packet that was allocated by av_read_frame
SDL_PollEvent(&event);
switch(event.type) {
case SDL_QUIT:
quit = 1;
SDL_Quit();
exit(0);
break;
default:
break;
}
}
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// 20130204 albert.liao modified start
// Close the video file
// av_close_input_file(pFormatCtx);
avformat_close_input(&pFormatCtx);
// 20130204 albert.liao modified end
return 0;
}
2013年2月4日 星期一
FFMEPG -- A ffmpeg and SDL Tutorial : tutorial03
參考 http://dranger.com/ffmpeg/tutorial03.html 進行實作練習,以下附上一個可以在 ffmpeg 1.1.1 (build Jan 27 2013) 正確編譯的例子。