• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

libavformat/output-example.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2003 Fabrice Bellard
00003  *
00004  * Permission is hereby granted, free of charge, to any person obtaining a copy
00005  * of this software and associated documentation files (the "Software"), to deal
00006  * in the Software without restriction, including without limitation the rights
00007  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
00008  * copies of the Software, and to permit persons to whom the Software is
00009  * furnished to do so, subject to the following conditions:
00010  *
00011  * The above copyright notice and this permission notice shall be included in
00012  * all copies or substantial portions of the Software.
00013  *
00014  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
00015  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
00016  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
00017  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
00018  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
00019  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
00020  * THE SOFTWARE.
00021  */
00022 
00032 #include <stdlib.h>
00033 #include <stdio.h>
00034 #include <string.h>
00035 #include <math.h>
00036 
00037 #include "libavutil/mathematics.h"
00038 #include "libavformat/avformat.h"
00039 #include "libswscale/swscale.h"
00040 
00041 #undef exit
00042 
00043 /* 5 seconds stream duration */
00044 #define STREAM_DURATION   5.0
00045 #define STREAM_FRAME_RATE 25 /* 25 images/s */
00046 #define STREAM_NB_FRAMES  ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
00047 #define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
00048 
00049 static int sws_flags = SWS_BICUBIC;
00050 
00051 /**************************************************************/
00052 /* audio output */
00053 
00054 static float t, tincr, tincr2;
00055 static int16_t *samples;
00056 static uint8_t *audio_outbuf;
00057 static int audio_outbuf_size;
00058 static int audio_input_frame_size;
00059 
00060 /*
00061  * add an audio output stream
00062  */
00063 static AVStream *add_audio_stream(AVFormatContext *oc, enum CodecID codec_id)
00064 {
00065     AVCodecContext *c;
00066     AVStream *st;
00067 
00068     st = av_new_stream(oc, 1);
00069     if (!st) {
00070         fprintf(stderr, "Could not alloc stream\n");
00071         exit(1);
00072     }
00073 
00074     c = st->codec;
00075     c->codec_id = codec_id;
00076     c->codec_type = AVMEDIA_TYPE_AUDIO;
00077 
00078     /* put sample parameters */
00079     c->sample_fmt = AV_SAMPLE_FMT_S16;
00080     c->bit_rate = 64000;
00081     c->sample_rate = 44100;
00082     c->channels = 2;
00083 
00084     // some formats want stream headers to be separate
00085     if(oc->oformat->flags & AVFMT_GLOBALHEADER)
00086         c->flags |= CODEC_FLAG_GLOBAL_HEADER;
00087 
00088     return st;
00089 }
00090 
00091 static void open_audio(AVFormatContext *oc, AVStream *st)
00092 {
00093     AVCodecContext *c;
00094     AVCodec *codec;
00095 
00096     c = st->codec;
00097 
00098     /* find the audio encoder */
00099     codec = avcodec_find_encoder(c->codec_id);
00100     if (!codec) {
00101         fprintf(stderr, "codec not found\n");
00102         exit(1);
00103     }
00104 
00105     /* open it */
00106     if (avcodec_open(c, codec) < 0) {
00107         fprintf(stderr, "could not open codec\n");
00108         exit(1);
00109     }
00110 
00111     /* init signal generator */
00112     t = 0;
00113     tincr = 2 * M_PI * 110.0 / c->sample_rate;
00114     /* increment frequency by 110 Hz per second */
00115     tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
00116 
00117     audio_outbuf_size = 10000;
00118     audio_outbuf = av_malloc(audio_outbuf_size);
00119 
00120     /* ugly hack for PCM codecs (will be removed ASAP with new PCM
00121        support to compute the input frame size in samples */
00122     if (c->frame_size <= 1) {
00123         audio_input_frame_size = audio_outbuf_size / c->channels;
00124         switch(st->codec->codec_id) {
00125         case CODEC_ID_PCM_S16LE:
00126         case CODEC_ID_PCM_S16BE:
00127         case CODEC_ID_PCM_U16LE:
00128         case CODEC_ID_PCM_U16BE:
00129             audio_input_frame_size >>= 1;
00130             break;
00131         default:
00132             break;
00133         }
00134     } else {
00135         audio_input_frame_size = c->frame_size;
00136     }
00137     samples = av_malloc(audio_input_frame_size * 2 * c->channels);
00138 }
00139 
00140 /* prepare a 16 bit dummy audio frame of 'frame_size' samples and
00141    'nb_channels' channels */
00142 static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
00143 {
00144     int j, i, v;
00145     int16_t *q;
00146 
00147     q = samples;
00148     for(j=0;j<frame_size;j++) {
00149         v = (int)(sin(t) * 10000);
00150         for(i = 0; i < nb_channels; i++)
00151             *q++ = v;
00152         t += tincr;
00153         tincr += tincr2;
00154     }
00155 }
00156 
00157 static void write_audio_frame(AVFormatContext *oc, AVStream *st)
00158 {
00159     AVCodecContext *c;
00160     AVPacket pkt;
00161     av_init_packet(&pkt);
00162 
00163     c = st->codec;
00164 
00165     get_audio_frame(samples, audio_input_frame_size, c->channels);
00166 
00167     pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
00168 
00169     if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
00170         pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
00171     pkt.flags |= AV_PKT_FLAG_KEY;
00172     pkt.stream_index= st->index;
00173     pkt.data= audio_outbuf;
00174 
00175     /* write the compressed frame in the media file */
00176     if (av_interleaved_write_frame(oc, &pkt) != 0) {
00177         fprintf(stderr, "Error while writing audio frame\n");
00178         exit(1);
00179     }
00180 }
00181 
00182 static void close_audio(AVFormatContext *oc, AVStream *st)
00183 {
00184     avcodec_close(st->codec);
00185 
00186     av_free(samples);
00187     av_free(audio_outbuf);
00188 }
00189 
00190 /**************************************************************/
00191 /* video output */
00192 
00193 static AVFrame *picture, *tmp_picture;
00194 static uint8_t *video_outbuf;
00195 static int frame_count, video_outbuf_size;
00196 
00197 /* add a video output stream */
00198 static AVStream *add_video_stream(AVFormatContext *oc, enum CodecID codec_id)
00199 {
00200     AVCodecContext *c;
00201     AVStream *st;
00202 
00203     st = avformat_new_stream(oc, NULL);
00204     if (!st) {
00205         fprintf(stderr, "Could not alloc stream\n");
00206         exit(1);
00207     }
00208 
00209     c = st->codec;
00210     c->codec_id = codec_id;
00211     c->codec_type = AVMEDIA_TYPE_VIDEO;
00212 
00213     /* put sample parameters */
00214     c->bit_rate = 400000;
00215     /* resolution must be a multiple of two */
00216     c->width = 352;
00217     c->height = 288;
00218     /* time base: this is the fundamental unit of time (in seconds) in terms
00219        of which frame timestamps are represented. for fixed-fps content,
00220        timebase should be 1/framerate and timestamp increments should be
00221        identically 1. */
00222     c->time_base.den = STREAM_FRAME_RATE;
00223     c->time_base.num = 1;
00224     c->gop_size = 12; /* emit one intra frame every twelve frames at most */
00225     c->pix_fmt = STREAM_PIX_FMT;
00226     if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
00227         /* just for testing, we also add B frames */
00228         c->max_b_frames = 2;
00229     }
00230     if (c->codec_id == CODEC_ID_MPEG1VIDEO){
00231         /* Needed to avoid using macroblocks in which some coeffs overflow.
00232            This does not happen with normal video, it just happens here as
00233            the motion of the chroma plane does not match the luma plane. */
00234         c->mb_decision=2;
00235     }
00236     // some formats want stream headers to be separate
00237     if(oc->oformat->flags & AVFMT_GLOBALHEADER)
00238         c->flags |= CODEC_FLAG_GLOBAL_HEADER;
00239 
00240     return st;
00241 }
00242 
00243 static AVFrame *alloc_picture(enum PixelFormat pix_fmt, int width, int height)
00244 {
00245     AVFrame *picture;
00246     uint8_t *picture_buf;
00247     int size;
00248 
00249     picture = avcodec_alloc_frame();
00250     if (!picture)
00251         return NULL;
00252     size = avpicture_get_size(pix_fmt, width, height);
00253     picture_buf = av_malloc(size);
00254     if (!picture_buf) {
00255         av_free(picture);
00256         return NULL;
00257     }
00258     avpicture_fill((AVPicture *)picture, picture_buf,
00259                    pix_fmt, width, height);
00260     return picture;
00261 }
00262 
00263 static void open_video(AVFormatContext *oc, AVStream *st)
00264 {
00265     AVCodec *codec;
00266     AVCodecContext *c;
00267 
00268     c = st->codec;
00269 
00270     /* find the video encoder */
00271     codec = avcodec_find_encoder(c->codec_id);
00272     if (!codec) {
00273         fprintf(stderr, "codec not found\n");
00274         exit(1);
00275     }
00276 
00277     /* open the codec */
00278     if (avcodec_open(c, codec) < 0) {
00279         fprintf(stderr, "could not open codec\n");
00280         exit(1);
00281     }
00282 
00283     video_outbuf = NULL;
00284     if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
00285         /* allocate output buffer */
00286         /* XXX: API change will be done */
00287         /* buffers passed into lav* can be allocated any way you prefer,
00288            as long as they're aligned enough for the architecture, and
00289            they're freed appropriately (such as using av_free for buffers
00290            allocated with av_malloc) */
00291         video_outbuf_size = 200000;
00292         video_outbuf = av_malloc(video_outbuf_size);
00293     }
00294 
00295     /* allocate the encoded raw picture */
00296     picture = alloc_picture(c->pix_fmt, c->width, c->height);
00297     if (!picture) {
00298         fprintf(stderr, "Could not allocate picture\n");
00299         exit(1);
00300     }
00301 
00302     /* if the output format is not YUV420P, then a temporary YUV420P
00303        picture is needed too. It is then converted to the required
00304        output format */
00305     tmp_picture = NULL;
00306     if (c->pix_fmt != PIX_FMT_YUV420P) {
00307         tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
00308         if (!tmp_picture) {
00309             fprintf(stderr, "Could not allocate temporary picture\n");
00310             exit(1);
00311         }
00312     }
00313 }
00314 
00315 /* prepare a dummy image */
00316 static void fill_yuv_image(AVFrame *pict, int frame_index, int width, int height)
00317 {
00318     int x, y, i;
00319 
00320     i = frame_index;
00321 
00322     /* Y */
00323     for(y=0;y<height;y++) {
00324         for(x=0;x<width;x++) {
00325             pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
00326         }
00327     }
00328 
00329     /* Cb and Cr */
00330     for(y=0;y<height/2;y++) {
00331         for(x=0;x<width/2;x++) {
00332             pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
00333             pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
00334         }
00335     }
00336 }
00337 
00338 static void write_video_frame(AVFormatContext *oc, AVStream *st)
00339 {
00340     int out_size, ret;
00341     AVCodecContext *c;
00342     static struct SwsContext *img_convert_ctx;
00343 
00344     c = st->codec;
00345 
00346     if (frame_count >= STREAM_NB_FRAMES) {
00347         /* no more frame to compress. The codec has a latency of a few
00348            frames if using B frames, so we get the last frames by
00349            passing the same picture again */
00350     } else {
00351         if (c->pix_fmt != PIX_FMT_YUV420P) {
00352             /* as we only generate a YUV420P picture, we must convert it
00353                to the codec pixel format if needed */
00354             if (img_convert_ctx == NULL) {
00355                 img_convert_ctx = sws_getContext(c->width, c->height,
00356                                                  PIX_FMT_YUV420P,
00357                                                  c->width, c->height,
00358                                                  c->pix_fmt,
00359                                                  sws_flags, NULL, NULL, NULL);
00360                 if (img_convert_ctx == NULL) {
00361                     fprintf(stderr, "Cannot initialize the conversion context\n");
00362                     exit(1);
00363                 }
00364             }
00365             fill_yuv_image(tmp_picture, frame_count, c->width, c->height);
00366             sws_scale(img_convert_ctx, tmp_picture->data, tmp_picture->linesize,
00367                       0, c->height, picture->data, picture->linesize);
00368         } else {
00369             fill_yuv_image(picture, frame_count, c->width, c->height);
00370         }
00371     }
00372 
00373 
00374     if (oc->oformat->flags & AVFMT_RAWPICTURE) {
00375         /* raw video case. The API will change slightly in the near
00376            futur for that */
00377         AVPacket pkt;
00378         av_init_packet(&pkt);
00379 
00380         pkt.flags |= AV_PKT_FLAG_KEY;
00381         pkt.stream_index= st->index;
00382         pkt.data= (uint8_t *)picture;
00383         pkt.size= sizeof(AVPicture);
00384 
00385         ret = av_interleaved_write_frame(oc, &pkt);
00386     } else {
00387         /* encode the image */
00388         out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
00389         /* if zero size, it means the image was buffered */
00390         if (out_size > 0) {
00391             AVPacket pkt;
00392             av_init_packet(&pkt);
00393 
00394             if (c->coded_frame->pts != AV_NOPTS_VALUE)
00395                 pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
00396             if(c->coded_frame->key_frame)
00397                 pkt.flags |= AV_PKT_FLAG_KEY;
00398             pkt.stream_index= st->index;
00399             pkt.data= video_outbuf;
00400             pkt.size= out_size;
00401 
00402             /* write the compressed frame in the media file */
00403             ret = av_interleaved_write_frame(oc, &pkt);
00404         } else {
00405             ret = 0;
00406         }
00407     }
00408     if (ret != 0) {
00409         fprintf(stderr, "Error while writing video frame\n");
00410         exit(1);
00411     }
00412     frame_count++;
00413 }
00414 
00415 static void close_video(AVFormatContext *oc, AVStream *st)
00416 {
00417     avcodec_close(st->codec);
00418     av_free(picture->data[0]);
00419     av_free(picture);
00420     if (tmp_picture) {
00421         av_free(tmp_picture->data[0]);
00422         av_free(tmp_picture);
00423     }
00424     av_free(video_outbuf);
00425 }
00426 
00427 /**************************************************************/
00428 /* media file output */
00429 
00430 int main(int argc, char **argv)
00431 {
00432     const char *filename;
00433     AVOutputFormat *fmt;
00434     AVFormatContext *oc;
00435     AVStream *audio_st, *video_st;
00436     double audio_pts, video_pts;
00437     int i;
00438 
00439     /* initialize libavcodec, and register all codecs and formats */
00440     av_register_all();
00441 
00442     if (argc != 2) {
00443         printf("usage: %s output_file\n"
00444                "API example program to output a media file with libavformat.\n"
00445                "The output format is automatically guessed according to the file extension.\n"
00446                "Raw images can also be output by using '%%d' in the filename\n"
00447                "\n", argv[0]);
00448         return 1;
00449     }
00450 
00451     filename = argv[1];
00452 
00453     /* auto detect the output format from the name. default is
00454        mpeg. */
00455     fmt = av_guess_format(NULL, filename, NULL);
00456     if (!fmt) {
00457         printf("Could not deduce output format from file extension: using MPEG.\n");
00458         fmt = av_guess_format("mpeg", NULL, NULL);
00459     }
00460     if (!fmt) {
00461         fprintf(stderr, "Could not find suitable output format\n");
00462         return 1;
00463     }
00464 
00465     /* allocate the output media context */
00466     oc = avformat_alloc_context();
00467     if (!oc) {
00468         fprintf(stderr, "Memory error\n");
00469         return 1;
00470     }
00471     oc->oformat = fmt;
00472     snprintf(oc->filename, sizeof(oc->filename), "%s", filename);
00473 
00474     /* add the audio and video streams using the default format codecs
00475        and initialize the codecs */
00476     video_st = NULL;
00477     audio_st = NULL;
00478     if (fmt->video_codec != CODEC_ID_NONE) {
00479         video_st = add_video_stream(oc, fmt->video_codec);
00480     }
00481     if (fmt->audio_codec != CODEC_ID_NONE) {
00482         audio_st = add_audio_stream(oc, fmt->audio_codec);
00483     }
00484 
00485     /* set the output parameters (must be done even if no
00486        parameters). */
00487     if (av_set_parameters(oc, NULL) < 0) {
00488         fprintf(stderr, "Invalid output format parameters\n");
00489         return 1;
00490     }
00491 
00492     av_dump_format(oc, 0, filename, 1);
00493 
00494     /* now that all the parameters are set, we can open the audio and
00495        video codecs and allocate the necessary encode buffers */
00496     if (video_st)
00497         open_video(oc, video_st);
00498     if (audio_st)
00499         open_audio(oc, audio_st);
00500 
00501     /* open the output file, if needed */
00502     if (!(fmt->flags & AVFMT_NOFILE)) {
00503         if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
00504             fprintf(stderr, "Could not open '%s'\n", filename);
00505             return 1;
00506         }
00507     }
00508 
00509     /* write the stream header, if any */
00510     av_write_header(oc);
00511 
00512     for(;;) {
00513         /* compute current audio and video time */
00514         if (audio_st)
00515             audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
00516         else
00517             audio_pts = 0.0;
00518 
00519         if (video_st)
00520             video_pts = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
00521         else
00522             video_pts = 0.0;
00523 
00524         if ((!audio_st || audio_pts >= STREAM_DURATION) &&
00525             (!video_st || video_pts >= STREAM_DURATION))
00526             break;
00527 
00528         /* write interleaved audio and video frames */
00529         if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
00530             write_audio_frame(oc, audio_st);
00531         } else {
00532             write_video_frame(oc, video_st);
00533         }
00534     }
00535 
00536     /* write the trailer, if any.  the trailer must be written
00537      * before you close the CodecContexts open when you wrote the
00538      * header; otherwise write_trailer may try to use memory that
00539      * was freed on av_codec_close() */
00540     av_write_trailer(oc);
00541 
00542     /* close each codec */
00543     if (video_st)
00544         close_video(oc, video_st);
00545     if (audio_st)
00546         close_audio(oc, audio_st);
00547 
00548     /* free the streams */
00549     for(i = 0; i < oc->nb_streams; i++) {
00550         av_freep(&oc->streams[i]->codec);
00551         av_freep(&oc->streams[i]);
00552     }
00553 
00554     if (!(fmt->flags & AVFMT_NOFILE)) {
00555         /* close the output file */
00556         avio_close(oc->pb);
00557     }
00558 
00559     /* free the stream */
00560     av_free(oc);
00561 
00562     return 0;
00563 }
Generated on Thu Jan 24 2013 17:08:55 for Libav by doxygen 1.7.1