投屏软件需要截屏功能,功能简单清晰,就是将当前屏幕显示的内容保存成图片文件。
有两种实现方案,
第一个方案是通过Adb指令实现,
adb exec-out screencap -p > screenshot.png
实现的原理是Android设备端执行screencap进行截图,然后下载到Adb终端运行的本地目录。
第二个方案是将AVFrame数据转成RGB格式后保存成图片,这种方式更灵活,可以在保存的过程中对图片数据进行个性化操作,例如:添加文本、滤镜等。
下面是截屏的具体实现:
//将AVFrame的yuv420转换成RGB
static int frameToImage(AVFrame* frame, enum AVCodecID codecID, uint8_t* outbuf, size_t outbufSize)
{
int ret = 0;
AVPacket pkt;
AVCodecContext* ctx = NULL;
AVFrame* rgbFrame = NULL;
uint8_t* buffer = NULL;
struct SwsContext* swsContext = NULL;
av_init_packet(&pkt);
const AVCodec* codec = avcodec_find_encoder(codecID);
if (!codec)
{
printf("avcodec_send_frame error %d", codecID);
goto end;
}
if (!codec->pix_fmts)
{
printf("unsupport pix format with codec %s", codec->name);
goto end;
}
ctx = avcodec_alloc_context3(codec);
ctx->bit_rate = 3000000;
ctx->width = frame->width;
ctx->height = frame->height;
ctx->time_base.num = 1;
ctx->time_base.den = 25;
ctx->gop_size = 10;
ctx->max_b_frames = 0;
ctx->thread_count = 1;
ctx->pix_fmt = *codec->pix_fmts;
ret = avcodec_open2(ctx, codec, NULL);
if (ret < 0)
{
printf("avcodec_open2 error %d", ret);
goto end;
}
if (frame->format != ctx->pix_fmt)
{
rgbFrame = av_frame_alloc();
if (rgbFrame == NULL)
{
printf("av_frame_alloc fail");
goto end;
}
swsContext = sws_getContext(frame->width, frame->height, (enum AVPixelFormat)frame->format, frame->width, frame->height, ctx->pix_fmt, 1, NULL, NULL, NULL);
if (!swsContext)
{
printf("sws_getContext fail");
goto end;
}
int bufferSize = av_image_get_buffer_size(ctx->pix_fmt, frame->width, frame->height, 1) * 2;
buffer = (unsigned char*)av_malloc(bufferSize);
if (buffer == NULL)
{
printf("buffer alloc fail:%d", bufferSize);
goto end;
}
av_image_fill_arrays(rgbFrame->data, rgbFrame->linesize, buffer, ctx->pix_fmt, frame->width, frame->height, 1);
if ((ret = sws_scale(swsContext, frame->data, frame->linesize, 0, frame->height, rgbFrame->data, rgbFrame->linesize)) < 0)
{
printf("sws_scale error %d", ret);
}
rgbFrame->format = ctx->pix_fmt;
rgbFrame->width = ctx->width;
rgbFrame->height = ctx->height;
ret = avcodec_send_frame(ctx, rgbFrame);
}
else
{
ret = avcodec_send_frame(ctx, frame);
}
if (ret < 0)
{
printf("avcodec_send_frame error %d", ret);
goto end;
}
ret = avcodec_receive_packet(ctx, &pkt);
if (ret < 0)
{
printf("avcodec_receive_packet error %d", ret);
goto end;
}
if (pkt.size > 0 && pkt.size <= outbufSize)
memcpy(outbuf, pkt.data, pkt.size);
ret = pkt.size;
end:
if (swsContext)
{
sws_freeContext(swsContext);
}
if (rgbFrame)
{
av_frame_unref(rgbFrame);
av_frame_free(&rgbFrame);
}
if (buffer)
{
av_free(buffer);
}
av_packet_unref(&pkt);
if (ctx)
{
avcodec_close(ctx);
avcodec_free_context(&ctx);
}
return ret;
}
具体调用实现:
//根据AVFrame中的图片宽高计算缓冲区大小
int bufSize = av_image_get_buffer_size(AV_PIX_FMT_BGRA, frame->width, frame->height, 64);
//创建RGB数据缓冲区
uint8_t* buf = (uint8_t*)av_malloc(bufSize);
int picSize = frameToImage(frame, AV_CODEC_ID_MJPEG, buf, bufSize);
//将转换后的数据保存
std::ofstream outfile(fileName, std::ios::binary);
if (!outfile) {
qDebug()<<"open file failed!";
return false;
}
outfile.write(reinterpret_cast<char*>(&buf[0]),bufSize);
outfile.close();
av_free(buf);
```
QQ: 617753820
github : https://github.com/linkedbyte/tomobile