一 前言

  这个项目采用瑞芯微的rk3588作为主控芯片,运行Linux操作系统。mipi摄像头采集图像数据后将数据传输给rk3588。随后会开启2个线程,其中1个线程将摄像头采集的图像数据进行编码,这里可以参考我的上一篇博客https://blog.csdn.net/qq_37669342/article/details/145032331?spm=1001.2014.3001.5502。随后将编码后的h264数据放入队列中。另一个线程从队列中取出数据,交给ffmpeg处理。ffmpeg会将这些h264数据流打上时间戳,然后通过srt网络通信发送到ubuntu的srt服务器上,windows上的ffplay会从srt服务器上拉流,最后解码并在屏幕上显示出来。

二 代码

  整体代码结构
在这里插入图片描述
  main.c

#include "/home/alientek/linux/07_video_net/include/camera.h"

#include "/home/alientek/linux/07_video_net/include/ffmpeg_init.h"

#include "/home/alientek/linux/07_video_net/include/venc_queue.h"

extern void YuvEncodeH264(int width, int height);

Node *venc_queue; //定义实体

unsigned short* YUV_buffer[FRAMEBUFFER_COUNT]; //定义实体

int main(int argc, char *argv[])
{
    int mainReturn;

    if (2 != argc) {
        fprintf(stderr, "Usage: %s <video_dev>\n", argv[0]);
        exit(EXIT_FAILURE);
    }

    /* 初始化摄像头 */
    if (v4l2_dev_init(argv[1]))
        exit(EXIT_FAILURE);

    /* 枚举所有格式并打印摄像头支持的分辨率及帧率 */
    //v4l2_enum_formats();
    //v4l2_print_formats();

    /* 设置格式 */
    if (v4l2_set_format())
        exit(EXIT_FAILURE);

    /* 初始化帧缓冲:申请、内存映射、入队 */    
    if (v4l2_init_buffer())
        exit(EXIT_FAILURE);    

    /* 开启视频采集 */
    if (v4l2_stream_on())
        exit(EXIT_FAILURE);

    //初始化ffmpeg
    mainReturn = init_ffmpeg_output_module();
    //mainReturn = init_ffmpeg_recv_module();

    //初始化视频队列
    venc_queue = initQueue();

    YUV_buffer[0] = (unsigned short*)malloc(1920*1088*1.5);
    YUV_buffer[1] = (unsigned short*)malloc(1920*1088*1.5);
    YUV_buffer[2] = (unsigned short*)malloc(1920*1088*1.5);

    YuvEncodeH264(1920, 1080);

}

  camera.c

#include "/home/alientek/linux/07_video_net/include/camera.h"


#define FMT_NUM_PLANES 1

static int v4l2_fd = -1;                //摄像头设备文件描述符
//static cam_fmt cam_fmts[10];
cam_buf_info double_camera_buf_infos[FRAMEBUFFER_COUNT]; //定义实体

int v4l2_dev_init(const char *device)
{
    struct v4l2_capability cap = {0};

    /* 打开摄像头 */
    v4l2_fd = open(device, O_RDWR);
    if (0 > v4l2_fd) {
        fprintf(stderr, "open error: %s: %s\n", device, strerror(errno));
        return -1;
    }

    /* 查询设备功能 */
    ioctl(v4l2_fd, VIDIOC_QUERYCAP, &cap);

    /* 判断是否是视频采集设备 */
    if (!(V4L2_CAP_VIDEO_CAPTURE_MPLANE & cap.capabilities)) {
        fprintf(stderr, "Error: %s: No capture video device!\n", device);
        close(v4l2_fd);
        return -1;
    }

    return 0;
}


int v4l2_set_format(void)
{

    /* 设置格式 */
    struct v4l2_format fmt;
    memset(&fmt, 0, sizeof(struct v4l2_format));
    fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;    
    fmt.fmt.pix.width = 1920;
    fmt.fmt.pix.height = 1080;
    fmt.fmt.pix.pixelformat = V4L2_PIX_FMT_NV12;//为了适配rk3588编码器,将摄像头的输入改为YUV420
    //fmt.fmt.pix_mp.field = V4L2_FIELD_NONE;
    //fmt.fmt.pix_mp.num_planes = 1;

    printf("视频帧大小<%d * %d>\n", fmt.fmt.pix_mp.width, fmt.fmt.pix_mp.height);

    if (0 == ioctl(v4l2_fd, VIDIOC_S_FMT, &fmt))
    {
        printf("set format ok: %d x %d\n", fmt.fmt.pix_mp.width, fmt.fmt.pix_mp.height);
    }
    else
    {
        printf("can not set format\n");
        return -1;
    }

    //设置曝光模式

}

int v4l2_init_buffer(void)
{
    unsigned int i;
    struct v4l2_requestbuffers reqbuf = {0};
    struct v4l2_buffer buf;

    /* 申请帧缓冲 */
    reqbuf.count = FRAMEBUFFER_COUNT;       //帧缓冲的数量
    reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    reqbuf.memory = V4L2_MEMORY_MMAP;

    if (0 > ioctl(v4l2_fd, VIDIOC_REQBUFS, &reqbuf)) {
        fprintf(stderr, "ioctl error: VIDIOC_REQBUFS: %s\n", strerror(errno));
        return -1;
    }

    for (i = 0; i < FRAMEBUFFER_COUNT; i++){
   
        /* 建立内存映射 */
        buf = (struct v4l2_buffer) {0};
        buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
        buf.memory = V4L2_MEMORY_MMAP;
        buf.index = i;
        struct v4l2_plane planes[FMT_NUM_PLANES];
        buf.m.planes = planes;
        buf.length = FMT_NUM_PLANES;

        ioctl(v4l2_fd, VIDIOC_QUERYBUF, &buf);
        double_camera_buf_infos[buf.index].length = buf.m.planes[0].length;
        double_camera_buf_infos[buf.index].start = mmap(NULL, buf.m.planes[0].length,
                PROT_READ | PROT_WRITE, MAP_SHARED,
                v4l2_fd, buf.m.planes[0].m.mem_offset);
        if (MAP_FAILED == double_camera_buf_infos[buf.index].start) {
            perror("mmap error");
            return -1;
        }
    
    }

    /* 入队 */
    for (i = 0; i < FRAMEBUFFER_COUNT; i++) {

        struct v4l2_plane planes[FMT_NUM_PLANES];
        buf = (struct v4l2_buffer) {0};
        buf.type    = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
        buf.memory  = V4L2_MEMORY_MMAP;
        buf.index   = i;
        buf.memory = V4L2_MEMORY_MMAP;  
        buf.m.planes = planes;
        buf.length = FMT_NUM_PLANES;            
        if (0 > ioctl(v4l2_fd, VIDIOC_QBUF, &buf)) {
            fprintf(stderr, "ioctl error: VIDIOC_QBUF: %s\n", strerror(errno));
            return -1;
        }
    }

    return 0;
}

int v4l2_stream_on(void)
{
    /* 打开摄像头、摄像头开始采集数据 */
    enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;

    if (0 > ioctl(v4l2_fd, VIDIOC_STREAMON, &type)) {
        fprintf(stderr, "ioctl error: VIDIOC_STREAMON: %s\n", strerror(errno));
        return -1;
    }

    return 0;
}


void v4l2_read_data(void)
{
    unsigned int i;
    struct v4l2_buffer buf;
    struct v4l2_plane planes[FMT_NUM_PLANES];
    buf = (struct v4l2_buffer) {0};

    buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
    buf.memory = V4L2_MEMORY_MMAP;

    buf.m.planes = planes;
    buf.length = FMT_NUM_PLANES;

    for(i = 0; i < FRAMEBUFFER_COUNT; i++) {

        ioctl(v4l2_fd, VIDIOC_DQBUF, &buf);     //出队
        
        //将出队的mipi摄像头数据通过内存复制,给YUV_buffer这个全局变量
        memcpy(YUV_buffer[buf.index], double_camera_buf_infos[buf.index].start, double_camera_buf_infos[buf.index].length);
 
        // 数据处理完之后、再入队、往复
        ioctl(v4l2_fd, VIDIOC_QBUF, &buf); 
    }
}

  encode.cpp

#include "/home/alientek/linux/07_video_net/include/encode.hpp"
#include "/home/alientek/linux/07_video_net/include/venc_queue.h"
#include "/home/alientek/linux/07_video_net/include/ffmpeg_init.h"
#include "/home/alientek/linux/07_video_net/include/camera.h"


#include <pthread.h>

using namespace std;

FILE* fp_output = NULL;

//------------------------------------------------------------------------------
MpiEncTestData *encoder_params_ptr = NULL;

MppApi *mpi_globle;
MppCtx ctx_globle;

bool first_frame_flg = true;
static void mpp_enc_cfg_set_default(MppEncCfgSet *cfg);

//------------------------------------------------------------------------------
//功能:MPP上下文初始化
//说明:根据MpiEncTestCmd参数设置MpiEncTestData参数
//------------------------------------------------------------------------------
MPP_RET test_ctx_init(MpiEncTestData **data, MpiEncTestCmd *cmd)
{
    MpiEncTestData *p = NULL;
    MPP_RET ret = MPP_OK; 
 
    if (!data || !cmd)
    {
        mpp_err_f("invalid input data %p cmd %p\n", data, cmd);
        return MPP_ERR_NULL_PTR;
    }
 
    p = mpp_calloc(MpiEncTestData, 1);
    if (!p)
    {
        mpp_err_f("create MpiEncTestData failed\n");
        ret = MPP_ERR_MALLOC;
        goto RET;
    }

    // get paramter from cmd
    p->width        = cmd->width;
    p->height       = cmd->height;
    p->hor_stride   = (MPP_ALIGN(cmd->width, 16));
    p->ver_stride   = (MPP_ALIGN(cmd->height, 16));
    p->fmt          = cmd->format;
    p->type         = cmd->type;
    p->bps          = cmd->bps_target;
    p->bps_min      = cmd->bps_min;
    p->bps_max      = cmd->bps_max;
    p->rc_mode      = cmd->rc_mode;
    p->num_frames   = cmd->num_frames;
    if (cmd->type == MPP_VIDEO_CodingMJPEG && p->num_frames == 0) {
        mpp_log("jpege default encode only one frame. Use -n [num] for rc case\n");
        p->num_frames   = 1;
    }
    p->gop_mode     = cmd->gop_mode;
    p->gop_len      = cmd->gop_len;
    p->vi_len       = cmd->vi_len;

    p->fps_in_flex  = cmd->fps_in_flex;
    p->fps_in_den   = cmd->fps_in_den;
    p->fps_in_num   = cmd->fps_in_num;
    p->fps_out_flex = cmd->fps_out_flex;
    p->fps_out_den  = cmd->fps_out_den;
    p->fps_out_num  = cmd->fps_out_num; 

    // update resource parameter
    switch (p->fmt & MPP_FRAME_FMT_MASK) {
    case MPP_FMT_YUV420SP:
    case MPP_FMT_YUV420P: {
        p->frame_size = MPP_ALIGN(p->hor_stride, 16) * MPP_ALIGN(p->ver_stride, 16) * 3 / 2;
    } break;

    case MPP_FMT_YUV422_YUYV :
    case MPP_FMT_YUV422_YVYU :
    case MPP_FMT_YUV422_UYVY :
    case MPP_FMT_YUV422_VYUY :
    case MPP_FMT_YUV422P :
    case MPP_FMT_YUV422SP :
    case MPP_FMT_RGB444 :
    case MPP_FMT_BGR444 :
    case MPP_FMT_RGB555 :
    case MPP_FMT_BGR555 :
    case MPP_FMT_RGB565 :
    case MPP_FMT_BGR565 : {
        p->frame_size = MPP_ALIGN(p->hor_stride, 64) * MPP_ALIGN(p->ver_stride, 64) * 2;
    } break;

    default: {
        p->frame_size = MPP_ALIGN(p->hor_stride, 64) * MPP_ALIGN(p->ver_stride, 64) * 4;
    } break;
    }

    if (MPP_FRAME_FMT_IS_FBC(p->fmt))
        p->header_size = MPP_ALIGN(MPP_ALIGN(p->width, 16) * MPP_ALIGN(p->height, 16) / 16, SZ_4K);
    else
        p->header_size = 0;
   

RET:
    *data = p;
    return ret;
}

//------------------------------------------------------------------------------
//功能:设置MPP编码器参数
//------------------------------------------------------------------------------

MPP_RET test_mpp_setup(MpiEncTestData *p)
{
    MPP_RET ret;
    MppApi *mpi;
    MppCtx ctx;
    MppEncCfg cfg;

    if (NULL == p)
        return MPP_ERR_NULL_PTR;

    mpi = p->mpi;
    ctx = p->ctx;
    cfg = p->cfg;
 

    /* setup default parameter */
    if (p->fps_in_den == 0)
        p->fps_in_den = 1;
    if (p->fps_in_num == 0)
        p->fps_in_num = 30;
    if (p->fps_out_den == 0)
        p->fps_out_den = 1;
    if (p->fps_out_num == 0)
        p->fps_out_num = 30;

    if (!p->bps)
        p->bps = p->width * p->height / 8 * (p->fps_out_num / p->fps_out_den);


    mpp_enc_cfg_set_s32(cfg, "prep:width", p->width);
    mpp_enc_cfg_set_s32(cfg, "prep:height", p->height);
    mpp_enc_cfg_set_s32(cfg, "prep:hor_stride", p->hor_stride);
    mpp_enc_cfg_set_s32(cfg, "prep:ver_stride", p->ver_stride);
    mpp_enc_cfg_set_s32(cfg, "prep:format", p->fmt);

    mpp_enc_cfg_set_s32(cfg, "rc:mode", p->rc_mode);

    switch (p->rc_mode) {
    case MPP_ENC_RC_MODE_FIXQP : {
        /* do not set bps on fix qp mode */
    } break;
    case MPP_ENC_RC_MODE_CBR : {
        /* CBR mode has narrow bound */
        mpp_enc_cfg_set_s32(cfg, "rc:bps_target", p->bps);
        mpp_enc_cfg_set_s32(cfg, "rc:bps_max", p->bps_max ? p->bps_max : p->bps * 17 / 16);
        mpp_enc_cfg_set_s32(cfg, "rc:bps_min", p->bps_min ? p->bps_min : p->bps * 15 / 16);
    } break;
    case MPP_ENC_RC_MODE_VBR : {
        /* CBR mode has wide bound */
        mpp_enc_cfg_set_s32(cfg, "rc:bps_target", p->bps);
        mpp_enc_cfg_set_s32(cfg, "rc:bps_max", p->bps_max ? p->bps_max : p->bps * 17 / 16);
        mpp_enc_cfg_set_s32(cfg, "rc:bps_min", p->bps_min ? p->bps_min : p->bps * 1 / 16);
    } break;
    default : {
        mpp_err_f("unsupport encoder rc mode %d\n", p->rc_mode);
    } break;
    }

    /* fix input / output frame rate */
    mpp_enc_cfg_set_s32(cfg, "rc:fps_in_flex", p->fps_in_flex);
    mpp_enc_cfg_set_s32(cfg, "rc:fps_in_num", p->fps_in_num);
    mpp_enc_cfg_set_s32(cfg, "rc:fps_in_denorm", p->fps_in_den);
    mpp_enc_cfg_set_s32(cfg, "rc:fps_out_flex", p->fps_out_flex);
    mpp_enc_cfg_set_s32(cfg, "rc:fps_out_num", p->fps_out_num);
    mpp_enc_cfg_set_s32(cfg, "rc:fps_out_denorm", p->fps_out_den);
    mpp_enc_cfg_set_s32(cfg, "rc:gop", p->gop_len ? p->gop_len : p->fps_out_num * 2);

    /* drop frame or not when bitrate overflow */
    mpp_enc_cfg_set_u32(cfg, "rc:drop_mode", MPP_ENC_RC_DROP_FRM_DISABLED);
    mpp_enc_cfg_set_u32(cfg, "rc:drop_thd", 20);        /* 20% of max bps */
    mpp_enc_cfg_set_u32(cfg, "rc:drop_gap", 1);         /* Do not continuous drop frame */

    /* setup codec  */
    mpp_enc_cfg_set_s32(cfg, "codec:type", p->type);
    switch (p->type) {
    case MPP_VIDEO_CodingAVC : {
        /*
         * H.264 profile_idc parameter
         * 66  - Baseline profile
         * 77  - Main profile
         * 100 - High profile
         */
        mpp_enc_cfg_set_s32(cfg, "h264:profile", 100);
        /*
         * H.264 level_idc parameter
         * 10 / 11 / 12 / 13    - qcif@15fps / cif@7.5fps / cif@15fps / cif@30fps
         * 20 / 21 / 22         - cif@30fps / half-D1@@25fps / D1@12.5fps
         * 30 / 31 / 32         - D1@25fps / 720p@30fps / 720p@60fps
         * 40 / 41 / 42         - 1080p@30fps / 1080p@30fps / 1080p@60fps
         * 50 / 51 / 52         - 4K@30fps
         */
        mpp_enc_cfg_set_s32(cfg, "h264:level", 40);
        mpp_enc_cfg_set_s32(cfg, "h264:cabac_en", 1);
        mpp_enc_cfg_set_s32(cfg, "h264:cabac_idc", 0);
        mpp_enc_cfg_set_s32(cfg, "h264:trans8x8", 1);

        if (p->rc_mode == MPP_ENC_RC_MODE_FIXQP) {
            mpp_enc_cfg_set_s32(cfg, "h264:qp_init", 20);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_max", 16);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_min", 16);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_max_i", 20);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_min_i", 20);
        } else {
            mpp_enc_cfg_set_s32(cfg, "h264:qp_init", 26);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_max", 51);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_min", 10);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_max_i", 46);
            mpp_enc_cfg_set_s32(cfg, "h264:qp_min_i", 24);
        }
    } break;
    case MPP_VIDEO_CodingMJPEG : {
        mpp_enc_cfg_set_s32(cfg, "jpeg:q_factor", 80);
        mpp_enc_cfg_set_s32(cfg, "jpeg:qf_max", 99);
        mpp_enc_cfg_set_s32(cfg, "jpeg:qf_min", 1);
    } break;
    case MPP_VIDEO_CodingVP8 : {
        mpp_enc_cfg_set_s32(cfg, "vp8:qp_init", 40);
        mpp_enc_cfg_set_s32(cfg, "vp8:qp_max",  127);
        mpp_enc_cfg_set_s32(cfg, "vp8:qp_min",  0);
        mpp_enc_cfg_set_s32(cfg, "vp8:qp_max_i", 127);
        mpp_enc_cfg_set_s32(cfg, "vp8:qp_min_i", 0);
    } break;
    case MPP_VIDEO_CodingHEVC : {
        mpp_enc_cfg_set_s32(cfg, "h265:qp_init", p->rc_mode == MPP_ENC_RC_MODE_FIXQP ? -1 : 26);
        mpp_enc_cfg_set_s32(cfg, "h265:qp_max", 51);
        mpp_enc_cfg_set_s32(cfg, "h265:qp_min", 10);
        mpp_enc_cfg_set_s32(cfg, "h265:qp_max_i", 46);
        mpp_enc_cfg_set_s32(cfg, "h265:qp_min_i", 24);
    } break;
    default : {
        mpp_err_f("unsupport encoder coding type %d\n", p->type);
    } break;
    }

    p->split_mode = 0;
    p->split_arg = 0;

    mpp_env_get_u32("split_mode", &p->split_mode, MPP_ENC_SPLIT_NONE);
    mpp_env_get_u32("split_arg", &p->split_arg, 0);

    if (p->split_mode) {
        mpp_log("%p split_mode %d split_arg %d\n", ctx, p->split_mode, p->split_arg);
        mpp_enc_cfg_set_s32(cfg, "split:mode", p->split_mode);
        mpp_enc_cfg_set_s32(cfg, "split:arg", p->split_arg);
    }

    ret = mpi->control(ctx, MPP_ENC_SET_CFG, cfg);
    if (ret) {
        mpp_err("mpi control enc set cfg failed ret %d\n", ret);
        goto RET;
    }

    /* optional */
    p->sei_mode = MPP_ENC_SEI_MODE_ONE_FRAME;
    ret = mpi->control(ctx, MPP_ENC_SET_SEI_CFG, &p->sei_mode);
    if (ret) {
        mpp_err("mpi control enc set sei cfg failed ret %d\n", ret);
        goto RET;
    }

    if (p->type == MPP_VIDEO_CodingAVC || p->type == MPP_VIDEO_CodingHEVC) {
        p->header_mode = MPP_ENC_HEADER_MODE_EACH_IDR;
        ret = mpi->control(ctx, MPP_ENC_SET_HEADER_MODE, &p->header_mode);
        if (ret) {
            mpp_err("mpi control enc set header mode failed ret %d\n", ret);
            goto RET;
        }
    }

    //RK_U32 gop_mode = p->gop_mode;

    /*
    mpp_env_get_u32("gop_mode", &gop_mode, gop_mode);
    if (gop_mode) {
        MppEncRefCfg ref;

        mpp_enc_ref_cfg_init(&ref);

        if (p->gop_mode < 4)
            mpi_enc_gen_ref_cfg(ref, gop_mode);
        else
            mpi_enc_gen_smart_gop_ref_cfg(ref, p->gop_len, p->vi_len);

        ret = mpi->control(ctx, MPP_ENC_SET_REF_CFG, ref);
        if (ret) {
            mpp_err("mpi control enc set ref cfg failed ret %d\n", ret);
            goto RET;
        }
        mpp_enc_ref_cfg_deinit(&ref);
    }
    */

    /* setup test mode by env */
    mpp_env_get_u32("osd_enable", &p->osd_enable, 0);
    mpp_env_get_u32("osd_mode", &p->osd_mode, MPP_ENC_OSD_PLT_TYPE_DEFAULT);
    mpp_env_get_u32("roi_enable", &p->roi_enable, 0);
    mpp_env_get_u32("user_data_enable", &p->user_data_enable, 0);

RET:
    return ret;
}

//------------------------------------------------------------------------------
//功能:将YUV420视频帧数据填充到MPP buffer
//说明:使用16字节对齐,MPP可以实现零拷贝,提高效率
//------------------------------------------------------------------------------
void read_yuv_buffer(RK_U8 *buf, unsigned short* YUV_buffer, RK_U32 width, RK_U32 height)
{
    RK_U8 *buf_y = buf;
    RK_U8 *buf_uv = buf + MPP_ALIGN(width, 16) * MPP_ALIGN(height, 16);

    //
    RK_U8 *yuvImg_y = (unsigned char*)YUV_buffer;
    RK_U8 *yuvImg_uv = yuvImg_y + MPP_ALIGN(width, 16) * MPP_ALIGN(height, 16);

    //
    memcpy(buf_y, yuvImg_y, width * height);
    memcpy(buf_uv, yuvImg_uv, width * height / 2);

}
 
//------------------------------------------------------------------------------
//功能:MPP执行编码
//------------------------------------------------------------------------------
MPP_RET test_mpp_run_yuv(unsigned short* YUV_buffer, MppApi *mpi, MppCtx &ctx, unsigned char* &H264_buf, int &length)
{
    MpiEncTestData *p = encoder_params_ptr;
    MPP_RET ret;
 
    MppFrame frame = NULL;
    MppPacket packet = NULL;
    void *buf = mpp_buffer_get_ptr(p->frm_buf);
    read_yuv_buffer((RK_U8*)buf, YUV_buffer, p->width, p->height);
    ret = mpp_frame_init(&frame);
    if (ret)
    {
        mpp_err_f("mpp_frame_init failed\n");
        goto RET;
    }
    //
    mpp_frame_set_width(frame, p->width);
    mpp_frame_set_height(frame, p->height);
    mpp_frame_set_hor_stride(frame, p->hor_stride);
    mpp_frame_set_ver_stride(frame, p->ver_stride);
    mpp_frame_set_fmt(frame, p->fmt);
    mpp_frame_set_buffer(frame, p->frm_buf);
    mpp_frame_set_eos(frame, p->frm_eos);
 
    ret = mpi->encode_put_frame(ctx, frame);
    if (ret)
    {
        mpp_err("mpp encode put frame failed\n");
        goto RET;
    }
    ret = mpi->encode_get_packet(ctx, &packet);
    if (ret)
    {
        mpp_err("mpp encode get packet failed\n");
        goto RET;
    }
    if (packet)
    {
        void *ptr   = mpp_packet_get_pos(packet);
        size_t len  = mpp_packet_get_length(packet);
        p->pkt_eos = mpp_packet_get_eos(packet);
        //
        H264_buf = new unsigned char[len];
        memcpy(H264_buf, ptr, len);
        length = len;
        mpp_packet_deinit(&packet);
        p->stream_size += len;
        p->frame_count++;
        if (p->pkt_eos)
        {
            mpp_log("found last packet\n");
            mpp_assert(p->frm_eos);
        }
    }
RET:
    return ret;
}

class MppEncCfgService
{
private:
    MppEncCfgService();
    ~MppEncCfgService();
    MppEncCfgService(const MppEncCfgService &);
    MppEncCfgService &operator=(const MppEncCfgService &);

    MppTrie mCfgApi;

public:
    static MppEncCfgService *get() {
        static Mutex lock;
        static MppEncCfgService instance;

        AutoMutex auto_lock(&lock);
        return &instance;
    }

    MppTrie get_api() { return mCfgApi; };
};

static void mpp_enc_cfg_set_default(MppEncCfgSet *cfg)
{
    cfg->prep.color = MPP_FRAME_SPC_UNSPECIFIED;
    cfg->prep.colorprim = MPP_FRAME_PRI_UNSPECIFIED;
    cfg->prep.colortrc = MPP_FRAME_TRC_UNSPECIFIED;
}

MPP_RET mpp_enc_cfg_init(MppEncCfg *cfg)
{
    MppEncCfgImpl *p = NULL;

    if (NULL == cfg) {
        mpp_err_f("invalid NULL input config\n");
        return MPP_ERR_NULL_PTR;
    }

    p = mpp_calloc(MppEncCfgImpl, 1);
    if (NULL == p) {
        mpp_err_f("create encoder config failed %p\n", p);
        *cfg = NULL;
        return MPP_ERR_NOMEM;
    }

    p->size = sizeof(*p);
    p->api = MppEncCfgService::get()->get_api();
    mpp_enc_cfg_set_default(&p->cfg);

    //mpp_env_get_u32("mpp_enc_cfg_debug", &mpp_enc_cfg_debug, 0);

    *cfg = p;

    return MPP_OK;
}
 
//------------------------------------------------------------------------------
//功能:初始化MPP编码器
//------------------------------------------------------------------------------
MpiEncTestData *test_mpp_run_yuv_init(MpiEncTestData *p, int width , int height, unsigned char* &SPS_buf, int &SPS_length)
{
    MPP_RET ret;
    MppPollType timeout = MPP_POLL_BLOCK;    
    //
    MpiEncTestCmd cmd;
    memset(&cmd, 0, sizeof(cmd));
    cmd.width = width;
    cmd.height = height;
    cmd.type = MPP_VIDEO_CodingAVC;
    cmd.format = MPP_FMT_YUV420SP;

    ret = test_ctx_init(&p, &cmd);
    if (ret)
    {
        mpp_err_f("test data init failed ret %d\n", ret);
        goto MPP_TEST_OUT;
    }   

    mpp_log("p->frame_size = %d----------------\n", p->frame_size);

    ret = mpp_buffer_group_get_internal(&p->buf_grp, MPP_BUFFER_TYPE_DRM);
    if (ret) {
        mpp_err_f("failed to get mpp buffer group ret %d\n", ret);
        goto MPP_TEST_OUT;
    }

    ret = mpp_buffer_get(p->buf_grp, &p->frm_buf, p->frame_size + p->header_size);
    if (ret) {
        mpp_err_f("failed to get buffer for input frame ret %d\n", ret);
        goto MPP_TEST_OUT;
    }

    ret = mpp_buffer_get(p->buf_grp, &p->pkt_buf, p->frame_size);
    if (ret) {
        mpp_err_f("failed to get buffer for output packet ret %d\n", ret);
        goto MPP_TEST_OUT;
    }

    // encoder demo
    ret = mpp_create(&p->ctx, &p->mpi);
    if (ret) {
        mpp_err("mpp_create failed ret %d\n", ret);
        goto MPP_TEST_OUT;
    }

    mpp_log("%p mpi_enc_test encoder test start w %d h %d type %d\n",
            p->ctx, p->width, p->height, p->type);

    ret = p->mpi->control(p->ctx, MPP_SET_OUTPUT_TIMEOUT, &timeout);
    if (MPP_OK != ret) {
        mpp_err("mpi control set output timeout %d ret %d\n", timeout, ret);
        goto MPP_TEST_OUT;
    }

    ret = mpp_init(p->ctx, MPP_CTX_ENC, p->type);
    if (ret) {
        mpp_err("mpp_init failed ret %d\n", ret);
        goto MPP_TEST_OUT;
    }

    ret = mpp_enc_cfg_init(&p->cfg);
    if (ret) {
        mpp_err_f("mpp_enc_cfg_init failed ret %d\n", ret);
        goto MPP_TEST_OUT;
    }    

    ret = test_mpp_setup(p);
    if (ret)
    {
        mpp_err_f("test mpp setup failed ret %d\n", ret);
        goto MPP_TEST_OUT;
    }
 
    mpi_globle = p->mpi;
    ctx_globle = p->ctx;
    
    if (p->type == MPP_VIDEO_CodingAVC)
    {
        MppPacket packet = NULL;
        ret = p->mpi->control(p->ctx, MPP_ENC_GET_EXTRA_INFO, &packet);
        if (ret)
        {
            mpp_err("mpi control enc get extra info failed\n");
        }
        //get and write sps/pps for H.264
        if (packet)
        {
            void *ptr    = mpp_packet_get_pos(packet);
            size_t len    = mpp_packet_get_length(packet);
            SPS_buf = new unsigned char[len];
            memcpy(SPS_buf, ptr, len);
            SPS_length = len;
            packet = NULL;
        }
    }
    return p;
MPP_TEST_OUT:
    return p;
}

void *mpp_venc_thread(void *args)
{
    unsigned char *H264_buf = NULL;
    int H264_buf_length = 0;
    int index;
    while (1)
    {
        v4l2_read_data();
        for (index = 0; index < 3; index++){
            test_mpp_run_yuv(YUV_buffer[index], mpi_globle, ctx_globle, H264_buf, H264_buf_length);

            // 将H264_buf数据加入视频队列

            video_data_packet_t *video_data_packet = (video_data_packet_t *)malloc(sizeof(video_data_packet_t));
            video_data_packet->data = (char*)malloc(H264_buf_length);
            memcpy(video_data_packet->data, H264_buf, H264_buf_length);
            video_data_packet->length = H264_buf_length;
            enQueue(venc_queue, video_data_packet);
            printf("H264_buf_length: %d! \n", H264_buf_length);

            //不能在这里释放掉video_data_packet->data
            //delete video_data_packet->data;

            //不能在这里释放掉video_data_packet
            //delete video_data_packet;        

            // 释放H264_buf,不要释放YUV_buffer[index]
            delete H264_buf;
            //delete YUV_buffer[index];

        }
    }
}

AVPacket *get_videopacket_from_queue(video_data_packet_t *video_data_packet)
{
    if (video_data_packet != NULL)
    {
        /*
     重新分配给定的缓冲区
   1.  如果入参的 AVBufferRef 为空,直接调用 av_realloc 分配一个新的缓存区,并调用 av_buffer_create 返回一个新的 AVBufferRef 结构;
   2.  如果入参的缓存区长度和入参 size 相等,直接返回 0;
   3.  如果对应的 AVBuffer 设置了 BUFFER_FLAG_REALLOCATABLE 标志,或者不可写,再或者 AVBufferRef data 字段指向的数据地址和 AVBuffer 的 data 地址不同,递归调用 av_buffer_realloc 分配一个新
的 buffer,并将 data 拷贝过去;
   4.  不满足上面的条件,直接调用 av_realloc 重新分配缓存区。
 */
        int ret = av_buffer_realloc(&video_avpacket->buf, video_data_packet->length + 70);
        if (ret < 0)
        {
            return NULL;
        }
        video_avpacket->size = video_data_packet->length;                                      // rk3588的视频长度赋值到AVPacket Size
        memcpy(video_avpacket->buf->data, video_data_packet->data, video_data_packet->length); // rk3588的视频数据赋值到AVPacket data
        video_avpacket->data = video_avpacket->buf->data;                                      // 把pkt->buf->data赋值到pkt->data
        video_avpacket->flags |= AV_PKT_FLAG_KEY;                                              // 默认flags是AV_PKT_FLAG_KEY

        return video_avpacket;
    }
    else
    {
        return NULL;
    }
}

int write_ffmpeg_avpacket(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
    /*将输出数据包时间戳值从编解码器重新调整为流时基 */
    av_packet_rescale_ts(pkt, *time_base, st->time_base);
    pkt->stream_index = st->index;

    return av_interleaved_write_frame(fmt_ctx, pkt);
}


void *mpp_dequeue_thread(void *args)
{
    int ret;
    while (1)
    {
        //if (start_push == 1)
        {
            static int64_t video_pts = 0;
            video_data_packet_t *de_data_packet = deQueue(venc_queue);
            AVPacket *video_packet = get_videopacket_from_queue(de_data_packet);

            if (video_packet != NULL)
            {
                video_packet->pts = video_pts++; // VIDEO_PTS按照帧率进行累加
                printf("video_packet->pts = %ld\n", video_packet->pts);
            }

            ret = write_ffmpeg_avpacket(output_ctx, &video_avcodec_ctx->time_base, video_stream, video_packet); // 向复合流写入视频数据
            if (ret != 0)
            {
                printf("write video avpacket error");
                //return -1;
            }

            delete de_data_packet->data;
            delete de_data_packet;
        }
    }

}
 
//------------------------------------------------------------------------------
//功能:将YUV420格式图像帧编码为H264数据包
//------------------------------------------------------------------------------
void YuvtoH264(int width, int height)
{
    unsigned char *SPS_buf = NULL;
    int SPS_buf_length = 0;
    
    if(first_frame_flg == true)
    {
        encoder_params_ptr = test_mpp_run_yuv_init(encoder_params_ptr, width, height, SPS_buf, SPS_buf_length);

        //将SPS_buf加入视频队列
        video_data_packet_t *video_data_packet = (video_data_packet_t *)malloc(sizeof(video_data_packet_t));
        video_data_packet->data = (char*)malloc(SPS_buf_length);
        memcpy(video_data_packet->data, SPS_buf, SPS_buf_length);
        video_data_packet->length = SPS_buf_length;
        enQueue(venc_queue, video_data_packet);

        //不能在这里释放掉video_data_packet->data
        //delete video_data_packet->data;

        //不能在这里释放掉video_data_packet
        //delete video_data_packet;

        delete SPS_buf;
        printf("first_frame! \n");
        printf("SPS length: %d! \n", SPS_buf_length);       
 
        //将test_mpp_run_yuv放入视频编码线程中,将名字改为mpp_venc_thread
        pthread_t pid;
        pthread_create(&pid, NULL, mpp_venc_thread, NULL);

        //开启出视频队列线程
        pthread_create(&pid, NULL, mpp_dequeue_thread, NULL);

        pthread_exit(NULL);
   
    }

}

//c文件的接口函数
void YuvEncodeH264(int width, int height)
{
    //将V4L2得到的YUV数据进行编码,编码为H264格式
    YuvtoH264(width, height);

}

  ffmpeg_init.c

#include "/home/alientek/linux/07_video_net/include/ffmpeg_init.h"

AVFormatContext *output_ctx;
AVStream *video_stream;
AVCodec *codec;
AVCodecContext *video_avcodec_ctx;
AVPacket *video_avpacket;
AVFormatContext *pFormatCtx = NULL;

int init_ffmpeg_output_module()
{  
    int ret;
    AVOutputFormat *fmt = NULL;

    avformat_network_init();

    ret = avformat_alloc_output_context2(&output_ctx, NULL, "mpegts", "srt://192.168.1.106:8080?streamid=uplive.sls.com/live/test1");//网络ip地址需要更改
    if (ret < 0)
    {
        printf("avformat_alloc_output_context2 failed\n");
    }
    else
    {
        printf("avformat_alloc_output_context2 success\n");
    }

    video_stream = avformat_new_stream(output_ctx, NULL);
    if (!video_stream)
    {
        printf("Can't not avformat_new_stream\n");
        return 0;
    }
    else
    {
        printf("Success avformat_new_stream\n");
    }

    codec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!(codec))
    {
        printf("Can't not find any encoder\n");
    }
    else
    {
        printf("Success find encoder\n");
    }

    video_stream->id = output_ctx->nb_streams - 1;
    video_avcodec_ctx = avcodec_alloc_context3(codec);
    if (!video_avcodec_ctx)
    {
        printf("Can't not allocate context3\n");
        return 0;
    }
    else
    {
        printf("Success allocate context3\n");
    }

    video_avcodec_ctx->bit_rate = 1920 * 1080 / 8 * (30 / 1); // FFMPEG视频码率,这里和encode.cpp line134保持一致
    // 分辨率必须是2的倍数
    video_avcodec_ctx->width = 1920;  // FFMPEG视频宽度
    video_avcodec_ctx->height = 1080; // FFMPEG视频高度

    video_stream->r_frame_rate.den = 1;            // FFMPEG帧率
    video_stream->r_frame_rate.num = 30;           // FFMPEG帧率,这里和encode.cpp line199保持一致
    video_stream->time_base = (AVRational){1, 30}; // Stream视频时间基,默认情况下等于帧率

    video_avcodec_ctx->time_base = video_stream->time_base; // 编码器时间基
    video_avcodec_ctx->gop_size = 60;                       // GOPSIZE,这里和encode.cpp line173保持一致
    video_avcodec_ctx->pix_fmt = AV_PIX_FMT_NV12;           // 图像格式

    if (output_ctx->oformat->flags & AVFMT_GLOBALHEADER)
    {
        video_avcodec_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }

    avcodec_open2(video_avcodec_ctx, codec, NULL);
    avcodec_parameters_from_context(video_stream->codecpar, video_avcodec_ctx);

    // av_dump_format(video_avcodec_ctx, 0, "test.ts", 1);

    fmt = output_ctx->oformat;

    if (!(fmt->flags & AVFMT_NOFILE))
    {
        // 打开输出文件
        
        ret = avio_open(&output_ctx->pb, "srt://192.168.1.106:8080?streamid=uplive.sls.com/live/test1", AVIO_FLAG_WRITE); //网络ip地址需要更改
        if (ret < 0)
        {
            // free_stream(output_ctx, video_stream);
            avformat_free_context(output_ctx);
            return -1;
        }
    }

    ret = avformat_write_header(output_ctx, NULL);
    video_avpacket = av_packet_alloc();   

}

  venc_queue.c

#include <stdio.h>
#include <stdlib.h>
#include "/home/alientek/linux/07_video_net/include/venc_queue.h" 

pthread_mutex_t videoMutex;
    //条件变量
pthread_cond_t videoCond;

// 初始化链接队列
Node *initQueue()
{
    pthread_mutex_init(&videoMutex, NULL);//mutex的初始化
    pthread_cond_init(&videoCond, NULL);//条件变量初始化
    Node *queue = (Node *)malloc(sizeof(Node));
    queue->next = NULL;
    return queue;
}

// 入队操作
void enQueue(Node *queue, video_data_packet_t *data_packet)
{
    pthread_mutex_lock(&videoMutex);
    Node *newNode = (Node *)malloc(sizeof(Node));
    newNode->data_packet = data_packet;
    newNode->next = NULL;
    Node *current = queue;
    while (current->next != NULL)
    {
        current = current->next;
    }
    current->next = newNode;
    pthread_cond_broadcast(&videoCond);//唤醒视频队列
    pthread_mutex_unlock(&videoMutex);//解视频锁
}

// 出队操作
video_data_packet_t *deQueue(Node *queue)
{
    pthread_mutex_lock(&videoMutex);

    if (queue->next == NULL)
    {
        pthread_cond_wait(&videoCond, &videoMutex);
    }
    Node *current = queue->next;
    queue->next = current->next;
    video_data_packet_t *data_packet_t = current->data_packet;
    //free(current);//free current会不会free掉current->data_packet
    pthread_mutex_unlock(&videoMutex);
    return data_packet_t;
}

三 实现效果

在这里插入图片描述
在这里插入图片描述

四 存在的问题

  一:和前一篇博客中的一样,视频最下面那几行偏绿。
  二:实测发现视频的延时有点大,将近2s,应该是程序中的内存复制memcpy过多,因此程序还需要优化。

Logo

智能硬件社区聚焦AI智能硬件技术生态,汇聚嵌入式AI、物联网硬件开发者,打造交流分享平台,同步全国赛事资讯、开展 OPC 核心人才招募,助力技术落地与开发者成长。

更多推荐