|
|
@@ -0,0 +1,188 @@ |
|
|
/* |
|
|
* Convert from OpenCV image and write movie with FFmpeg |
|
|
* |
|
|
* Copyright (c) 2021 yohhoy |
|
|
*/ |
|
|
#include <iostream> |
|
|
#include <vector> |
|
|
// FFmpeg |
|
|
extern "C" { |
|
|
#include <libavformat/avformat.h> |
|
|
#include <libavcodec/avcodec.h> |
|
|
#include <libavutil/imgutils.h> |
|
|
#include <libswscale/swscale.h> |
|
|
} |
|
|
// OpenCV |
|
|
#include <opencv2/opencv.hpp> |
|
|
#include <opencv2/highgui.hpp> |
|
|
|
|
|
|
|
|
int main(int argc, char* argv[]) |
|
|
{ |
|
|
if (argc < 2) { |
|
|
std::cout << "Usage: cv2ff <outfile>" << std::endl; |
|
|
return 1; |
|
|
} |
|
|
const char* outfile = argv[1]; |
|
|
|
|
|
// av_log_set_level(AV_LOG_DEBUG); |
|
|
int ret; |
|
|
|
|
|
const int dst_width = 640; |
|
|
const int dst_height = 480; |
|
|
const AVRational dst_fps = {30, 1}; |
|
|
|
|
|
// initialize OpenCV capture as input frame generator |
|
|
cv::VideoCapture cvcap(0); |
|
|
if (!cvcap.isOpened()) { |
|
|
std::cerr << "fail to open cv::VideoCapture"; |
|
|
return 2; |
|
|
} |
|
|
cvcap.set(cv::CAP_PROP_FRAME_WIDTH, dst_width); |
|
|
cvcap.set(cv::CAP_PROP_FRAME_HEIGHT, dst_height); |
|
|
cvcap.set(cv::CAP_PROP_FPS, dst_fps.num); |
|
|
// some device ignore above parameters for capturing image, |
|
|
// so we query actual parameters for image rescaler. |
|
|
const int cv_width = cvcap.get(cv::CAP_PROP_FRAME_WIDTH); |
|
|
const int cv_height = cvcap.get(cv::CAP_PROP_FRAME_HEIGHT); |
|
|
const int cv_fps = cvcap.get(cv::CAP_PROP_FPS); |
|
|
|
|
|
// open output format context |
|
|
AVFormatContext* outctx = nullptr; |
|
|
ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
// create new video stream |
|
|
AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec); |
|
|
AVStream* vstrm = avformat_new_stream(outctx, vcodec); |
|
|
if (!vstrm) { |
|
|
std::cerr << "fail to avformat_new_stream"; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
// open video encoder |
|
|
AVCodecContext* cctx = avcodec_alloc_context3(vcodec); |
|
|
if (!vstrm) { |
|
|
std::cerr << "fail to avcodec_alloc_context3"; |
|
|
return 2; |
|
|
} |
|
|
cctx->width = dst_width; |
|
|
cctx->height = dst_height; |
|
|
cctx->pix_fmt = vcodec->pix_fmts[0]; |
|
|
cctx->time_base = av_inv_q(dst_fps); |
|
|
cctx->framerate = dst_fps; |
|
|
if (outctx->oformat->flags & AVFMT_GLOBALHEADER) |
|
|
cctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER; |
|
|
ret = avcodec_open2(cctx, vcodec, nullptr); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to avcodec_open2: ret=" << ret; |
|
|
return 2; |
|
|
} |
|
|
avcodec_parameters_from_context(vstrm->codecpar, cctx); |
|
|
|
|
|
// initialize sample scaler |
|
|
SwsContext* swsctx = sws_getContext( |
|
|
cv_width, cv_height, AV_PIX_FMT_BGR24, |
|
|
dst_width, dst_height, cctx->pix_fmt, |
|
|
SWS_BILINEAR, nullptr, nullptr, nullptr); |
|
|
if (!swsctx) { |
|
|
std::cerr << "fail to sws_getContext"; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
// allocate frame buffer for encoding |
|
|
AVFrame* frame = av_frame_alloc(); |
|
|
frame->width = dst_width; |
|
|
frame->height = dst_height; |
|
|
frame->format = static_cast<int>(cctx->pix_fmt); |
|
|
ret = av_frame_get_buffer(frame, 32); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to av_frame_get_buffer: ret=" << ret; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
// allocate packet to retrive encoded frame |
|
|
AVPacket* pkt = av_packet_alloc(); |
|
|
|
|
|
// open output IO context |
|
|
ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to avio_open2: ret=" << ret; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
std::cout |
|
|
<< "camera: " << cv_width << 'x' << cv_height << '@' << cv_fps << "\n" |
|
|
<< "outfile: " << outfile << "\n" |
|
|
<< "format: " << outctx->oformat->name << "\n" |
|
|
<< "vcodec: " << vcodec->name << "\n" |
|
|
<< "size: " << dst_width << 'x' << dst_height << "\n" |
|
|
<< "fps: " << av_q2d(cctx->framerate) << "\n" |
|
|
<< "pixfmt: " << av_get_pix_fmt_name(cctx->pix_fmt) << "\n" |
|
|
<< std::flush; |
|
|
|
|
|
// write media container header (if any) |
|
|
ret = avformat_write_header(outctx, nullptr); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to avformat_write_header: ret=" << ret; |
|
|
return 2; |
|
|
} |
|
|
|
|
|
cv::Mat image; |
|
|
|
|
|
// encoding loop |
|
|
int64_t frame_pts = 0; |
|
|
unsigned nb_frames = 0; |
|
|
bool end_of_stream = false; |
|
|
for (;;) { |
|
|
if (!end_of_stream) { |
|
|
// retrieve source image |
|
|
cvcap >> image; |
|
|
cv::imshow("press ESC to exit", image); |
|
|
if (cv::waitKey(33) == 0x1b) { |
|
|
// flush encoder |
|
|
avcodec_send_frame(cctx, nullptr); |
|
|
end_of_stream = true; |
|
|
} |
|
|
} |
|
|
if (!end_of_stream) { |
|
|
// convert cv::Mat(OpenCV) to AVFrame(FFmpeg) |
|
|
const int stride[4] = { static_cast<int>(image.step[0]) }; |
|
|
sws_scale(swsctx, &image.data, stride, 0, image.rows, frame->data, frame->linesize); |
|
|
frame->pts = frame_pts++; |
|
|
// encode video frame |
|
|
ret = avcodec_send_frame(cctx, frame); |
|
|
if (ret < 0) { |
|
|
std::cerr << "fail to avcodec_send_frame: ret=" << ret << "\n"; |
|
|
break; |
|
|
} |
|
|
} |
|
|
while ((ret = avcodec_receive_packet(cctx, pkt)) >= 0) { |
|
|
// rescale packet timestamp |
|
|
pkt->duration = 1; |
|
|
av_packet_rescale_ts(pkt, cctx->time_base, vstrm->time_base); |
|
|
// write encoded packet |
|
|
av_write_frame(outctx, pkt); |
|
|
av_packet_unref(pkt); |
|
|
std::cout << nb_frames << '\r' << std::flush; // dump progress |
|
|
++nb_frames; |
|
|
} |
|
|
if (ret == AVERROR_EOF) |
|
|
break; |
|
|
}; |
|
|
std::cout << nb_frames << " frames encoded" << std::endl; |
|
|
|
|
|
// write trailer and close file |
|
|
av_write_trailer(outctx); |
|
|
avio_close(outctx->pb); |
|
|
|
|
|
av_packet_free(&pkt); |
|
|
av_frame_free(&frame); |
|
|
sws_freeContext(swsctx); |
|
|
avcodec_free_context(&cctx); |
|
|
avformat_free_context(outctx); |
|
|
return 0; |
|
|
} |