c++ - Trying to sync audio/visual using FFMpeg and openAL -
hi have been studying dranger ffmpeg tutorial explains how sync audio , visual once have frames displayed , audio playing im at.
unfortunately, tutorial out of date (stephen dranger explaained himself me) , uses sdl im not doing - blackberry 10 application.
i cannot make video frames display @ correct speed (they playing fast) , have been trying on week - seriously!
i have 3 threads happening - 1 read stream audio , video queues , 2 threads audio , video.
if explain whats happening after scanning relevent code lifesaver.
the delay (what pass usleep(testdelay) seems going (incrementing) doesn't seem right me.
count = 1; myapp* inst = worker->app;//(videouploadfacebook*)arg; qdebug() << "\n start loadstream"; w = new qwaitcondition(); w2 = new qwaitcondition(); context = avformat_alloc_context(); inst->threadstarted = true; cout << "start of decoding thread"; cout.flush(); av_register_all(); avcodec_register_all(); avformat_network_init(); av_log_set_callback(&log_callback); avinputformat *pformat; //const char device[] = "/dev/video0"; const char formatname[] = "mp4"; cout << "2start of decoding thread"; cout.flush(); if (!(pformat = av_find_input_format(formatname))) { printf("can't find input format %s\n", formatname); //return void*; } //open rtsp if(avformat_open_input(&context, inst->capturedurl.data(), pformat,null) != 0){ // return ; cout << "error opening of decoding thread: " << inst->capturedurl.data(); cout.flush(); } cout << "3start of decoding thread"; cout.flush(); // av_dump_format(context, 0, inst->capturedurl.data(), 0); /* if(avformat_find_stream_info(context,null) < 0){ return exit_failure; } */ //search video stream for(int =0;i<context->nb_streams;i++){ if(context->streams[i]->codec->codec_type == avmedia_type_video) inst->video_stream_index = i; } cout << "3z start of decoding thread"; cout.flush(); avformatcontext* oc = avformat_alloc_context(); av_read_play(context);//play rtsp avdictionary *optionsdict = null; ccontext = context->streams[inst->video_stream_index]->codec; inst->audioc = context->streams[1]->codec; cout << "4start of decoding thread"; cout.flush(); codec = avcodec_find_decoder(ccontext->codec_id); ccontext->pix_fmt = pix_fmt_yuv420p; avcodec* audio_codec = avcodec_find_decoder(inst->audioc->codec_id); inst->packet = new avpacket(); if (!audio_codec) { cout << "audio codec not found\n"; //fflush( stdout ); exit(1); } if (avcodec_open2(inst->audioc, audio_codec, null) < 0) { cout << "could not open codec\n"; //fflush( stdout ); exit(1); } if (avcodec_open2(ccontext, codec, &optionsdict) < 0) exit(1); cout << "5start of decoding thread"; cout.flush(); inst->pic = avcodec_alloc_frame(); av_init_packet(inst->packet); while(av_read_frame(context,inst->packet) >= 0 && &inst->keepgoing) { if(inst->packet->stream_index == 0){//packet video int check = 0; // av_init_packet(inst->packet); int result = avcodec_decode_video2(ccontext, inst->pic, &check, inst->packet); if(check) break; } } inst->originalvideowidth = inst->pic->width; inst->originalvideoheight = inst->pic->height; float aspect = (float)inst->originalvideoheight / (float)inst->originalvideowidth; inst->newvideowidth = inst->originalvideowidth; int newheight = (int)(inst->newvideowidth * aspect); inst->newvideoheight = newheight;//(int)inst->originalvideoheight / inst->originalvideowidth * inst->newvideowidth;// = new height int size = avpicture_get_size(pix_fmt_yuv420p, inst->originalvideowidth, inst->originalvideoheight); uint8_t* picture_buf = (uint8_t*)(av_malloc(size)); avpicture_fill((avpicture *) inst->pic, picture_buf, pix_fmt_yuv420p, inst->originalvideowidth, inst->originalvideoheight); picrgb = avcodec_alloc_frame(); int size2 = avpicture_get_size(pix_fmt_yuv420p, inst->newvideowidth, inst->newvideoheight); uint8_t* picture_buf2 = (uint8_t*)(av_malloc(size2)); avpicture_fill((avpicture *) picrgb, picture_buf2, pix_fmt_yuv420p, inst->newvideowidth, inst->newvideoheight); if(ccontext->pix_fmt != pix_fmt_yuv420p) { std::cout << "fmt != 420!!!: " << ccontext->pix_fmt << std::endl;// // return (exit_success);//-1; } if (inst->createforeignwindow(inst->myforeignwindow->windowgroup(), "helloforeignwindowappidqq", 0, 0, inst->newvideowidth, inst->newvideoheight)) { } else { qdebug() << "the foreginwindow not initialized"; } inst->keepgoing = true; inst->img_convert_ctx = sws_getcontext(inst->originalvideowidth, inst->originalvideoheight, pix_fmt_yuv420p, inst->newvideowidth, inst->newvideoheight, pix_fmt_yuv420p, sws_bilinear, null, null, null); = (videostate*)av_mallocz(sizeof(videostate)); if (!is) return null; is->audiostream = 1; is->audio_st = context->streams[1]; is->audio_buf_size = 0; is->audio_buf_index = 0; is->videostream = 0; is->video_st = context->streams[0]; is->frame_timer = (double)av_gettime() / 1000000.0; is->frame_last_delay = 40e-3; is->av_sync_type = default_av_sync_type; //av_strlcpy(is->filename, filename, sizeof(is->filename)); is->iformat = pformat; is->ytop = 0; is->xleft = 0; /* start video display */ is->pictq_mutex = new qmutex(); is->pictq_cond = new qwaitcondition(); is->subpq_mutex = new qmutex(); is->subpq_cond = new qwaitcondition(); is->video_current_pts_time = av_gettime(); packet_queue_init(&audioq); packet_queue_init(&videoq); is->audioq = audioq; is->videoq = videoq; avpacket* packet2 = new avpacket(); ccontext->get_buffer = our_get_buffer; ccontext->release_buffer = our_release_buffer; av_init_packet(packet2); while(inst->keepgoing) { if(av_read_frame(context,packet2) < 0 && keepgoing) { printf("bufferframe not read frame stream.\n"); fflush( stdout ); }else { if(packet2->stream_index == 0) { packet_queue_put(&videoq, packet2); } else if(packet2->stream_index == 1) { packet_queue_put(&audioq, packet2); } else { av_free_packet(packet2); } if(!videothreadstarted) { videothreadstarted = true; qthread* thread = new qthread; videothread = new videostreamworker(this); // give qthread ownership of worker object videothread->movetothread(thread); connect(videothread, signal(error(qstring)), this, slot(errorhandler(qstring))); qobject::connect(videothread, signal(refreshneeded()), this, slot(refreshneededslot())); connect(thread, signal(started()), videothread, slot(dowork())); connect(videothread, signal(finished()), thread, slot(quit())); connect(videothread, signal(finished()), videothread, slot(deletelater())); connect(thread, signal(finished()), thread, slot(deletelater())); thread->start(); } if(!audiothreadstarted) { audiothreadstarted = true; qthread* thread = new qthread; audiostreamworker* videothread = new audiostreamworker(this); // give qthread ownership of worker object videothread->movetothread(thread); // connect videothread error signal errorhandler slot. connect(videothread, signal(error(qstring)), this, slot(errorhandler(qstring))); // connects thread’s started() signal process() slot in videothread, causing start. connect(thread, signal(started()), videothread, slot(dowork())); connect(videothread, signal(finished()), thread, slot(quit())); connect(videothread, signal(finished()), videothread, slot(deletelater())); // make sure thread object deleted after execution has finished. connect(thread, signal(finished()), thread, slot(deletelater())); thread->start(); } } } //finished main loop int myapp::video_thread() { //videostate *is = (videostate *)arg; avpacket pkt1, *packet = &pkt1; int len1, framefinished; double pts; pic = avcodec_alloc_frame(); for(;;) { if(packet_queue_get(&videoq, packet, 1) < 0) { // means quit getting packets break; } pts = 0; global_video_pkt_pts2 = packet->pts; // decode video frame len1 = avcodec_decode_video2(ccontext, pic, &framefinished, packet); if(packet->dts == av_nopts_value && pic->opaque && *(uint64_t*)pic->opaque != av_nopts_value) { pts = *(uint64_t *)pic->opaque; } else if(packet->dts != av_nopts_value) { pts = packet->dts; } else { pts = 0; } pts *= av_q2d(is->video_st->time_base); // did video frame? if(framefinished) { pts = synchronize_video(is, pic, pts); actualpts = pts; refreshslot(); } av_free_packet(packet); } av_free(pic); return 0; } int myapp::audio_thread() { //videostate *is = (videostate *)arg; avpacket pkt1, *packet = &pkt1; int len1, framefinished; aluint source; alenum format = 0; // aluint frequency; alenum alerror; alint val2; aluint buffers[num_buffers]; int datasize; alccontext *acontext; alcdevice *device; if (!alutinit(null, null)) { // printf(stderr, "init alut error\n"); } device = alcopendevice(null); if (device == null) { // printf(stderr, "device error\n"); } //create context acontext = alccreatecontext(device, null); alcmakecontextcurrent(acontext); if(!(acontext)) { printf("could not create openal context!\n"); return 0; } allistener3f(al_position, 0.0f, 0.0f, 0.0f); //alenum alerror; if(algeterror() != al_no_error) { cout << "could not create buffers"; cout.flush(); fflush( stdout ); return 0; } algenbuffers(num_buffers, buffers); algensources(1, &source); if(algeterror() != al_no_error) { cout << "after not create buffers or source.\n"; cout.flush( ); return 0; } int i; int indexofpacket; double pts; //double pts; int n; for(i = 0; < num_buffers; i++) { if(packet_queue_get(&audioq, packet, 1) < 0) { // means quit getting packets break; } cout << "streamindex=audio \n"; cout.flush( ); //printf("before decode audio\n"); //fflush( stdout ); // avpacket *packet = new avpacket();//malloc(sizeof(avpacket*)); avframe *decodedframe = null; int gotframe = 0; // avframe* decodedframe; if(!decodedframe) { if(!(decodedframe = avcodec_alloc_frame())) { cout << "run out of memory, stop streaming...\n"; fflush( stdout ); cout.flush(); return -2; } } else { avcodec_get_frame_defaults(decodedframe); } int len = avcodec_decode_audio4(audioc, decodedframe, &gotframe, packet); if(len < 0) { cout << "error while decoding.\n"; cout.flush( ); return -3; } if(len < 0) { /* if error, skip frame */ is->audio_pkt_size = 0; //break; } is->audio_pkt_data += len; is->audio_pkt_size -= len; pts = is->audio_clock; // *pts_ptr = pts; n = 2 * is->audio_st->codec->channels; is->audio_clock += (double)packet->size/ (double)(n * is->audio_st->codec->sample_rate); if(gotframe) { cout << "got audio frame.\n"; cout.flush( ); // have buffer ready, send datasize = av_samples_get_buffer_size(null, audioc->channels, decodedframe->nb_samples, audioc->sample_fmt, 1); if(!format) { if(audioc->sample_fmt == av_sample_fmt_u8 || audioc->sample_fmt == av_sample_fmt_u8p) { if(audioc->channels == 1) { format = al_format_mono8; } else if(audioc->channels == 2) { format = al_format_stereo8; } } else if(audioc->sample_fmt == av_sample_fmt_s16 || audioc->sample_fmt == av_sample_fmt_s16p) { if(audioc->channels == 1) { format = al_format_mono16; } else if(audioc->channels == 2) { format = al_format_stereo16; } } if(!format) { cout << "openal can't open format of sound.\n"; cout.flush( ); return -4; } } printf("albufferdata audio b4.\n"); fflush( stdout ); albufferdata(buffers[i], format, *decodedframe->data, datasize, decodedframe->sample_rate); cout << "after albufferdata buffers \n"; cout.flush( ); av_free_packet(packet); //=av_free(packet); av_free(decodedframe); if((alerror = algeterror()) != al_no_error) { printf("error while buffering.\n"); printalerror(alerror); return -6; } } } cout << "before quoe buffers \n"; cout.flush(); alsourcequeuebuffers(source, num_buffers, buffers); cout << "before play.\n"; cout.flush(); alsourceplay(source); cout << "after play.\n"; cout.flush(); if((alerror = algeterror()) != al_no_error) { cout << "error strating stream.\n"; cout.flush(); printalerror(alerror); return 0; } // avpacket *pkt = &is->audio_pkt; while(keepgoing) { while(packet_queue_get(&audioq, packet, 1) >= 0) { // means quit getting packets { algetsourcei(source, al_buffers_processed, &val2); usleep(sleep_buffering); } while(val2 <= 0); if(algeterror() != al_no_error) { fprintf(stderr, "error gettingsource :(\n"); return 1; } while(val2--) { aluint buffer; alsourceunqueuebuffers(source, 1, &buffer); if(algeterror() != al_no_error) { fprintf(stderr, "error unqueue buffers :(\n"); // return 1; } avframe *decodedframe = null; int gotframe = 0; // avframe* decodedframe; if(!decodedframe) { if(!(decodedframe = avcodec_alloc_frame())) { cout << "run out of memory, stop streaming...\n"; //fflush( stdout ); cout.flush(); return -2; } } else { avcodec_get_frame_defaults(decodedframe); } int len = avcodec_decode_audio4(audioc, decodedframe, &gotframe, packet); if(len < 0) { cout << "error while decoding.\n"; cout.flush( ); is->audio_pkt_size = 0; return -3; } is->audio_pkt_data += len; is->audio_pkt_size -= len; if(packet->size <= 0) { /* no data yet, more frames */ //continue; } if(gotframe) { pts = is->audio_clock; len = synchronize_audio(is, (int16_t *)is->audio_buf, packet->size, pts); is->audio_buf_size = packet->size; pts = is->audio_clock; // *pts_ptr = pts; n = 2 * is->audio_st->codec->channels; is->audio_clock += (double)packet->size / (double)(n * is->audio_st->codec->sample_rate); if(packet->pts != av_nopts_value) { is->audio_clock = av_q2d(is->audio_st->time_base)*packet->pts; } len = av_samples_get_buffer_size(null, audioc->channels, decodedframe->nb_samples, audioc->sample_fmt, 1); albufferdata(buffer, format, *decodedframe->data, len, decodedframe->sample_rate); if(algeterror() != al_no_error) { fprintf(stderr, "error buffering :(\n"); return 1; } alsourcequeuebuffers(source, 1, &buffer); if(algeterror() != al_no_error) { fprintf(stderr, "error queueing buffers :(\n"); return 1; } } } algetsourcei(source, al_source_state, &val2); if(val2 != al_playing) alsourceplay(source); } //pic = avcodec_alloc_frame(); } qdebug() << "end audiothread"; return 1; } void myapp::refreshslot() { if(true) { printf("got frame %d, %d\n", pic->width, ccontext->width); fflush( stdout ); sws_scale(img_convert_ctx, (const uint8_t **)pic->data, pic->linesize, 0, originalvideoheight, &picrgb->data[0], &picrgb->linesize[0]); printf("rescaled frame %d, %d\n", newvideowidth, newvideoheight); fflush( stdout ); //av_free_packet(packet); //av_init_packet(packet); qdebug() << "waking audio video finished"; ////mutex.unlock(); //mutex2.lock(); doingvideoframe = false; //doingaudioframe = false; ////mutex2.unlock(); //mutex2.unlock(); //w2->wakeall(); //w->wakeall(); qdebug() << "now woke audio"; //pic = picrgb; uint8_t *srcy = picrgb->data[0]; uint8_t *srcu = picrgb->data[1]; uint8_t *srcv = picrgb->data[2]; printf("got src yuv frame %d\n", &srcy); fflush( stdout ); unsigned char *ptr = null; screen_get_buffer_property_pv(mscreenpixelbuffer, screen_property_pointer, (void**) &ptr); unsigned char *y = ptr; unsigned char *u = y + (newvideoheight * mstride) ; unsigned char *v = u + (newvideoheight * mstride) / 4; int = 0; printf("got buffer picrgbwidth= %d \n", newvideowidth); fflush( stdout ); ( = 0; < newvideoheight; i++) { int doff = * mstride; int soff = * picrgb->linesize[0]; memcpy(&y[doff], &srcy[soff], newvideowidth); } ( = 0; < newvideoheight / 2; i++) { int doff = * mstride / 2; int soff = * picrgb->linesize[1]; memcpy(&u[doff], &srcu[soff], newvideowidth / 2); } ( = 0; < newvideoheight / 2; i++) { int doff = * mstride / 2; int soff = * picrgb->linesize[2]; memcpy(&v[doff], &srcv[soff], newvideowidth / 2); } printf("before posttoscreen \n"); fflush( stdout ); video_refresh_timer(); qdebug() << "end refreshslot"; } else { } } void myapp::refreshneededslot2() { printf("blitting buffer"); fflush(stdout); screen_buffer_t screen_buffer; screen_get_window_property_pv(mscreenwindow, screen_property_render_buffers, (void**) &screen_buffer); int attribs[] = { screen_blit_source_width, newvideowidth, screen_blit_source_height, newvideoheight, screen_blit_end }; int res2 = screen_blit(mscreenctx, screen_buffer, mscreenpixelbuffer, attribs); printf("dirty rectangles"); fflush(stdout); int dirty_rects[] = { 0, 0, newvideowidth, newvideoheight }; screen_post_window(mscreenwindow, screen_buffer, 1, dirty_rects, 0); printf("done screneposdtwindow"); fflush(stdout); } void myapp::video_refresh_timer() { testdelay = 0; // videostate *is = ( videostate* )userdata; videopicture *vp; //double pts = 0 ; double actual_delay, delay, sync_threshold, ref_clock, diff; if(is->video_st) { if(false)////is->pictq_size == 0) { testdelay = 1; schedule_refresh(is, 1); } else { // vp = &is->pictq[is->pictq_rindex]; delay = actualpts - is->frame_last_pts; /* pts last time */ if(delay <= 0 || delay >= 1.0) { /* if incorrect delay, use previous 1 */ delay = is->frame_last_delay; } /* save next time */ is->frame_last_delay = delay; is->frame_last_pts = actualpts; is->video_current_pts = actualpts; is->video_current_pts_time = av_gettime(); /* update delay sync audio */ ref_clock = get_audio_clock(is); diff = actualpts - ref_clock; /* skip or repeat frame. take delay account ffplay still doesn't "know if best guess." */ sync_threshold = (delay > av_sync_threshold) ? delay : av_sync_threshold; if(fabs(diff) < av_nosync_threshold) { if(diff <= -sync_threshold) { delay = 0; } else if(diff >= sync_threshold) { delay = 2 * delay; } } is->frame_timer += delay; /* computer real delay */ actual_delay = is->frame_timer - (av_gettime() / 1000000.0); if(actual_delay < 0.010) { /* should skip picture instead */ actual_delay = 0.010; } testdelay = (int)(actual_delay * 1000 + 0.5); schedule_refresh(is, (int)(actual_delay * 1000 + 0.5)); /* show picture! */ //video_display(is); // sdl_condsignal(is->pictq_cond); // sdl_unlockmutex(is->pictq_mutex); } } else { testdelay = 100; schedule_refresh(is, 100); } } void myapp::schedule_refresh(videostate *is, int delay) { qdebug() << "start schedule refresh timer" << delay; typeofevent = ff_refresh_event2; w->wakeall(); // sdl_addtimer(delay, }
i waiting on data in loop in following way
qmutex mutex; mutex.lock(); while(keepgoing) { qdebug() << "mainthread" << testdelay; w->wait(&mutex); mutex.unlock(); qdebug() << "mainthread past wait"; if(!keepgoing) { break; } if(testdelay > 0 && typeofevent == ff_refresh_event2) { usleep(testdelay); refreshneededslot2(); } else if(testdelay > 0 && typeofevent == ff_quit_event2) { keepgoing = false; exit(0); break; // usleep(testdelay); // refreshneededslot2(); } qdebug() << "mainthreadend"; mutex.lock(); } mutex.unlock();
please let me know if need provide more relevent code. i'm sorry code untidy - still learning c++ , have been modifying code on week mentioned.
just added sample of output i'm seeing print outs console - can't head around (it's complicated level of expertise) when see frames being played , audio playing it's difficult give when took me couple of weeks stage.
please give me hand if spot problem.
mainthread past wait pts after syncvideo= 1073394046 got frame 640, 640 start video_refresh_timer actualpts = 1.66833 frame lastpts = 1.63497 start schedule refresh timer need delay 123
pts after syncvideo= 1073429033 got frame 640, 640 mainthread loop delay before refresh = 123 start video_refresh_timer actualpts = 1.7017 frame lastpts = 1.66833 start schedule refresh timer need delay 115
mainthread past wait pts after syncvideo= 1073464021 got frame 640, 640 start video_refresh_timer actualpts = 1.73507 frame lastpts = 1.7017 start schedule refresh timer need delay 140
mainthread loop delay before refresh = 140 pts after syncvideo= 1073499008 got frame 640, 640 start video_refresh_timer actualpts = 1.76843 frame lastpts = 1.73507 start schedule refresh timer need delay 163
mainthread past wait pts after syncvideo= 1073533996 got frame 640, 640 start video_refresh_timer actualpts = 1.8018 frame lastpts = 1.76843 start schedule refresh timer need delay 188
mainthread loop delay before refresh = 188 pts after syncvideo= 1073568983 got frame 640, 640 start video_refresh_timer actualpts = 1.83517 frame lastpts = 1.8018 start schedule refresh timer need delay 246
mainthread past wait pts after syncvideo= 1073603971 got frame 640, 640 start video_refresh_timer actualpts = 1.86853 frame lastpts = 1.83517 start schedule refresh timer need delay 299
mainthread loop delay before refresh = 299 pts after syncvideo= 1073638958 got frame 640, 640 start video_refresh_timer actualpts = 1.9019 frame lastpts = 1.86853 start schedule refresh timer need delay 358
mainthread past wait pts after syncvideo= 1073673946 got frame 640, 640 start video_refresh_timer actualpts = 1.93527 frame lastpts = 1.9019 start schedule refresh timer need delay 416
mainthread loop delay before refresh = 416 pts after syncvideo= 1073708933 got frame 640, 640 start video_refresh_timer actualpts = 1.96863 frame lastpts = 1.93527 start schedule refresh timer need delay 474
mainthread past wait pts after syncvideo= 1073742872 got frame 640, 640 mainthread loop delay before refresh = 474 start video_refresh_timer actualpts = 2.002 frame lastpts = 1.96863 start schedule refresh timer need delay 518
mainthread past wait pts after syncvideo= 1073760366 got frame 640, 640 start video_refresh_timer actualpts = 2.03537 frame lastpts = 2.002 start schedule refresh timer need delay 575
Comments
Post a Comment