diff --git a/packages/multimedia/ffmpeg/patches/v4l2-request/ffmpeg-001-v4l2-request.patch b/packages/multimedia/ffmpeg/patches/v4l2-request/ffmpeg-001-v4l2-request.patch new file mode 100644 index 0000000000..7ef133994f --- /dev/null +++ b/packages/multimedia/ffmpeg/patches/v4l2-request/ffmpeg-001-v4l2-request.patch @@ -0,0 +1,5202 @@ +From df0e167bdb7db4cc2340ab831e6961a1108a753c Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Mon, 3 Dec 2018 23:48:04 +0100 +Subject: [PATCH 01/18] avutil: add av_buffer_pool_flush() + +Signed-off-by: Jonas Karlman +--- + libavutil/buffer.c | 13 +++++++++++++ + libavutil/buffer.h | 5 +++++ + 2 files changed, 18 insertions(+) + +diff --git a/libavutil/buffer.c b/libavutil/buffer.c +index 38a554208a..b0fedabc3e 100644 +--- a/libavutil/buffer.c ++++ b/libavutil/buffer.c +@@ -273,6 +273,19 @@ static void buffer_pool_free(AVBufferPool *pool) + av_freep(&pool); + } + ++void av_buffer_pool_flush(AVBufferPool *pool) ++{ ++ ff_mutex_lock(&pool->mutex); ++ while (pool->pool) { ++ BufferPoolEntry *buf = pool->pool; ++ pool->pool = buf->next; ++ ++ buf->free(buf->opaque, buf->data); ++ av_freep(&buf); ++ } ++ ff_mutex_unlock(&pool->mutex); ++} ++ + void av_buffer_pool_uninit(AVBufferPool **ppool) + { + AVBufferPool *pool; +diff --git a/libavutil/buffer.h b/libavutil/buffer.h +index c0f3f6cc9a..998beec9ac 100644 +--- a/libavutil/buffer.h ++++ b/libavutil/buffer.h +@@ -267,6 +267,11 @@ AVBufferPool *av_buffer_pool_init2(int size, void *opaque, + AVBufferRef* (*alloc)(void *opaque, int size), + void (*pool_free)(void *opaque)); + ++/** ++ * Free all available buffers in a buffer pool. ++ */ ++ void av_buffer_pool_flush(AVBufferPool *pool); ++ + /** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + +From cdb4dbed4b223b9a21287cfcc594af99e7aa3990 Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 02/18] Add common V4L2 request API code + +Signed-off-by: Jonas Karlman +--- + configure | 12 + + libavcodec/Makefile | 1 + + libavcodec/hwconfig.h | 2 + + libavcodec/v4l2_request.c | 984 ++++++++++++++++++++++++++++++++++++++ + libavcodec/v4l2_request.h | 77 +++ + 5 files changed, 1076 insertions(+) + create mode 100644 libavcodec/v4l2_request.c + create mode 100644 libavcodec/v4l2_request.h + +diff --git a/configure b/configure +index 8569a60bf8..9f9909a236 100755 +--- a/configure ++++ b/configure +@@ -274,6 +274,7 @@ External library support: + --enable-libtls enable LibreSSL (via libtls), needed for https support + if openssl, gnutls or mbedtls is not used [no] + --enable-libtwolame enable MP2 encoding via libtwolame [no] ++ --enable-libudev enable libudev [no] + --enable-libv4l2 enable libv4l2/v4l-utils [no] + --enable-libvidstab enable video stabilization using vid.stab [no] + --enable-libvmaf enable vmaf filter via libvmaf [no] +@@ -342,6 +343,7 @@ External library support: + --enable-omx-rpi enable OpenMAX IL code for Raspberry Pi [no] + --enable-rkmpp enable Rockchip Media Process Platform code [no] + --disable-v4l2-m2m disable V4L2 mem2mem code [autodetect] ++ --enable-v4l2-request enable V4L2 request API code [no] + --disable-vaapi disable Video Acceleration API (mainly Unix/Intel) code [autodetect] + --disable-vdpau disable Nvidia Video Decode and Presentation API for Unix code [autodetect] + --disable-videotoolbox disable VideoToolbox code [autodetect] +@@ -1807,6 +1809,7 @@ EXTERNAL_LIBRARY_LIST=" + libtesseract + libtheora + libtwolame ++ libudev + libv4l2 + libvorbis + libvpx +@@ -1861,6 +1864,7 @@ HWACCEL_LIBRARY_LIST=" + mmal + omx + opencl ++ v4l2_request + vulkan + " + +@@ -2903,6 +2907,7 @@ d3d11va_deps="dxva_h ID3D11VideoDecoder ID3D11VideoContext" + dxva2_deps="dxva2api_h DXVA2_ConfigPictureDecode ole32 user32" + ffnvcodec_deps_any="libdl LoadLibrary" + nvdec_deps="ffnvcodec" ++v4l2_request_deps="linux_videodev2_h linux_media_h v4l2_timeval_to_ns libdrm libudev" + vaapi_x11_deps="xlib" + videotoolbox_hwaccel_deps="videotoolbox pthreads" + videotoolbox_hwaccel_extralibs="-framework QuartzCore" +@@ -6376,6 +6381,7 @@ enabled libtls && require_pkg_config libtls libtls tls.h tls_configur + enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame && + { check_lib libtwolame twolame.h twolame_encode_buffer_float32_interleaved -ltwolame || + die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; } ++enabled libudev && require_pkg_config libudev libudev libudev.h udev_new + enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl + enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit + enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.3.9" libvmaf.h compute_vmaf +@@ -6475,6 +6481,10 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r + { enabled libdrm || + die "ERROR: rkmpp requires --enable-libdrm"; } + } ++enabled v4l2_request && { enabled libdrm || ++ die "ERROR: v4l2-request requires --enable-libdrm"; } && ++ { enabled libudev || ++ die "ERROR: v4l2-request requires --enable-libudev"; } + enabled vapoursynth && require_pkg_config vapoursynth "vapoursynth-script >= 42" VSScript.h vsscript_init + + +@@ -6556,6 +6566,8 @@ if enabled v4l2_m2m; then + check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;" + fi + ++check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns ++ + check_headers sys/videoio.h + test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete + +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index 5a6ea59715..d742205168 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -153,6 +153,7 @@ OBJS-$(CONFIG_VP3DSP) += vp3dsp.o + OBJS-$(CONFIG_VP56DSP) += vp56dsp.o + OBJS-$(CONFIG_VP8DSP) += vp8dsp.o + OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_context.o v4l2_buffers.o v4l2_fmt.o ++OBJS-$(CONFIG_V4L2_REQUEST) += v4l2_request.o + OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o + OBJS-$(CONFIG_WMV2DSP) += wmv2dsp.o + +diff --git a/libavcodec/hwconfig.h b/libavcodec/hwconfig.h +index f421dc909f..ee78d8ab8e 100644 +--- a/libavcodec/hwconfig.h ++++ b/libavcodec/hwconfig.h +@@ -80,6 +80,8 @@ typedef struct AVCodecHWConfigInternal { + HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel) + #define HWACCEL_XVMC(codec) \ + HW_CONFIG_HWACCEL(0, 0, 1, XVMC, NONE, ff_ ## codec ## _xvmc_hwaccel) ++#define HWACCEL_V4L2REQUEST(codec) \ ++ HW_CONFIG_HWACCEL(1, 0, 0, DRM_PRIME, DRM, ff_ ## codec ## _v4l2request_hwaccel) + + #define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \ + &(const AVCodecHWConfigInternal) { \ +diff --git a/libavcodec/v4l2_request.c b/libavcodec/v4l2_request.c +new file mode 100644 +index 0000000000..7d97468153 +--- /dev/null ++++ b/libavcodec/v4l2_request.c +@@ -0,0 +1,984 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include "decode.h" ++#include "internal.h" ++#include "v4l2_request.h" ++ ++uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame) ++{ ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; ++ return req ? v4l2_timeval_to_ns(&req->capture.buffer.timestamp) : 0; ++} ++ ++int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame) ++{ ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; ++ memset(&req->drm, 0, sizeof(AVDRMFrameDescriptor)); ++ req->output.used = 0; ++ return 0; ++} ++ ++int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size) ++{ ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; ++ if (req->output.used + size + (AV_INPUT_BUFFER_PADDING_SIZE * 4) <= req->output.size) { ++ memcpy(req->output.addr + req->output.used, data, size); ++ req->output.used += size; ++ } else { ++ av_log(avctx, AV_LOG_ERROR, "%s: output.used=%u output.size=%u size=%u\n", __func__, req->output.used, req->output.size, size); ++ } ++ return 0; ++} ++ ++static int v4l2_request_controls(V4L2RequestContext *ctx, int request_fd, unsigned long type, struct v4l2_ext_control *control, int count) ++{ ++ struct v4l2_ext_controls controls = { ++ .controls = control, ++ .count = count, ++ .request_fd = request_fd, ++ .which = (request_fd >= 0) ? V4L2_CTRL_WHICH_REQUEST_VAL : 0, ++ }; ++ ++ if (!control || !count) ++ return 0; ++ ++ return ioctl(ctx->video_fd, type, &controls); ++} ++ ++static int v4l2_request_set_controls(V4L2RequestContext *ctx, int request_fd, struct v4l2_ext_control *control, int count) ++{ ++ return v4l2_request_controls(ctx, request_fd, VIDIOC_S_EXT_CTRLS, control, count); ++} ++ ++int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ ret = v4l2_request_controls(ctx, -1, VIDIOC_S_EXT_CTRLS, control, count); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return AVERROR(EINVAL); ++ } ++ ++ return ret; ++} ++ ++int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ ret = v4l2_request_controls(ctx, -1, VIDIOC_G_EXT_CTRLS, control, count); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get controls failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return AVERROR(EINVAL); ++ } ++ ++ return ret; ++} ++ ++int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control) ++{ ++ int ret; ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_QUERY_EXT_CTRL, control); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return AVERROR(EINVAL); ++ } ++ ++ return 0; ++} ++ ++int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id) ++{ ++ int ret; ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ struct v4l2_queryctrl control = { ++ .id = id, ++ }; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCTRL, &control); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return AVERROR(EINVAL); ++ } ++ ++ return control.default_value; ++} ++ ++static int v4l2_request_queue_buffer(V4L2RequestContext *ctx, int request_fd, V4L2RequestBuffer *buf, uint32_t flags) ++{ ++ struct v4l2_plane planes[1] = {}; ++ struct v4l2_buffer buffer = { ++ .type = buf->buffer.type, ++ .memory = buf->buffer.memory, ++ .index = buf->index, ++ .timestamp.tv_usec = ctx->timestamp, ++ .bytesused = buf->used, ++ .request_fd = request_fd, ++ .flags = ((request_fd >= 0) ? V4L2_BUF_FLAG_REQUEST_FD : 0) | flags, ++ }; ++ ++ buf->buffer.timestamp = buffer.timestamp; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) { ++ planes[0].bytesused = buf->used; ++ buffer.bytesused = 0; ++ buffer.length = 1; ++ buffer.m.planes = planes; ++ } ++ ++ return ioctl(ctx->video_fd, VIDIOC_QBUF, &buffer); ++} ++ ++static int v4l2_request_dequeue_buffer(V4L2RequestContext *ctx, V4L2RequestBuffer *buf) ++{ ++ int ret; ++ struct v4l2_plane planes[1] = {}; ++ struct v4l2_buffer buffer = { ++ .type = buf->buffer.type, ++ .memory = buf->buffer.memory, ++ .index = buf->index, ++ }; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) { ++ buffer.length = 1; ++ buffer.m.planes = planes; ++ } ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_DQBUF, &buffer); ++ if (ret < 0) ++ return ret; ++ ++ buf->buffer.timestamp = buffer.timestamp; ++ return 0; ++} ++ ++const uint32_t v4l2_request_capture_pixelformats[] = { ++ V4L2_PIX_FMT_NV12, ++#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED ++ V4L2_PIX_FMT_SUNXI_TILED_NV12, ++#endif ++}; ++ ++static int v4l2_request_set_drm_descriptor(V4L2RequestDescriptor *req, struct v4l2_format *format) ++{ ++ AVDRMFrameDescriptor *desc = &req->drm; ++ AVDRMLayerDescriptor *layer = &desc->layers[0]; ++ uint32_t pixelformat = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.pixelformat : format->fmt.pix.pixelformat; ++ ++ switch (pixelformat) { ++ case V4L2_PIX_FMT_NV12: ++ layer->format = DRM_FORMAT_NV12; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; ++ break; ++#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED ++ case V4L2_PIX_FMT_SUNXI_TILED_NV12: ++ layer->format = DRM_FORMAT_NV12; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_ALLWINNER_TILED; ++ break; ++#endif ++ default: ++ return -1; ++ } ++ ++ desc->nb_objects = 1; ++ desc->objects[0].fd = req->capture.fd; ++ desc->objects[0].size = req->capture.size; ++ ++ desc->nb_layers = 1; ++ layer->nb_planes = 2; ++ ++ layer->planes[0].object_index = 0; ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.plane_fmt[0].bytesperline : format->fmt.pix.bytesperline; ++ ++ layer->planes[1].object_index = 0; ++ layer->planes[1].offset = layer->planes[0].pitch * (V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.height : format->fmt.pix.height); ++ layer->planes[1].pitch = layer->planes[0].pitch; ++ ++ return 0; ++} ++ ++static int v4l2_request_queue_decode(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; ++ struct timeval tv = { 2, 0 }; ++ fd_set except_fds; ++ int ret; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p used=%u controls=%d index=%d fd=%d request_fd=%d first_slice=%d last_slice=%d\n", __func__, avctx, req->output.used, count, req->capture.index, req->capture.fd, req->request_fd, first_slice, last_slice); ++ ++ if (first_slice) ++ ctx->timestamp++; ++ ++ ret = v4l2_request_set_controls(ctx, req->request_fd, control, count); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed for request %d, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ ++ memset(req->output.addr + req->output.used, 0, AV_INPUT_BUFFER_PADDING_SIZE * 4); ++ ++ ret = v4l2_request_queue_buffer(ctx, req->request_fd, &req->output, last_slice ? 0 : V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: queue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ ++ if (first_slice) { ++ ret = v4l2_request_queue_buffer(ctx, -1, &req->capture, 0); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: queue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ } ++ ++ // NOTE: do we need to dequeue when request fails/timeout? ++ ++ // 4. queue request and wait ++ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_QUEUE, NULL); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: queue request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); ++ goto fail; ++ } ++ ++ FD_ZERO(&except_fds); ++ FD_SET(req->request_fd, &except_fds); ++ ++ ret = select(req->request_fd + 1, NULL, NULL, &except_fds, &tv); ++ if (ret == 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: request %d timeout\n", __func__, req->request_fd); ++ goto fail; ++ } else if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: select request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); ++ goto fail; ++ } ++ ++ ret = v4l2_request_dequeue_buffer(ctx, &req->output); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ ++ if (last_slice) { ++ ret = v4l2_request_dequeue_buffer(ctx, &req->capture); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ } ++ ++ // TODO: check errors ++ // buffer.flags & V4L2_BUF_FLAG_ERROR ++ ++ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); ++ return -1; ++ } ++ ++ if (last_slice) ++ return v4l2_request_set_drm_descriptor(req, &ctx->format); ++ ++ return 0; ++ ++fail: ++ ret = v4l2_request_dequeue_buffer(ctx, &req->output); ++ if (ret < 0) ++ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); ++ ++ ret = v4l2_request_dequeue_buffer(ctx, &req->capture); ++ if (ret < 0) ++ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); ++ ++ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL); ++ if (ret < 0) ++ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); ++ ++ return -1; ++} ++ ++int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice) ++{ ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; ++ ++ // fall back to queue each slice as a full frame ++ if ((req->output.capabilities & V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) != V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) ++ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1); ++ ++ return v4l2_request_queue_decode(avctx, frame, control, count, first_slice, last_slice); ++} ++ ++int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count) ++{ ++ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1); ++} ++ ++static int v4l2_request_try_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ struct v4l2_fmtdesc fmtdesc = { ++ .index = 0, ++ .type = type, ++ }; ++ ++ if (V4L2_TYPE_IS_OUTPUT(type)) { ++ struct v4l2_create_buffers buffers = { ++ .count = 0, ++ .memory = V4L2_MEMORY_MMAP, ++ .format.type = type, ++ }; ++ ++ if (ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); ++ return -1; ++ } ++ ++ if ((buffers.capabilities & V4L2_BUF_CAP_SUPPORTS_REQUESTS) != V4L2_BUF_CAP_SUPPORTS_REQUESTS) { ++ av_log(avctx, AV_LOG_INFO, "%s: output buffer type do not support requests, capabilities %u\n", __func__, buffers.capabilities); ++ return -1; ++ } ++ } ++ ++ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) { ++ if (fmtdesc.pixelformat == pixelformat) ++ return 0; ++ ++ fmtdesc.index++; ++ } ++ ++ av_log(avctx, AV_LOG_INFO, "%s: pixelformat %u not supported for type %u\n", __func__, pixelformat, type); ++ return -1; ++} ++ ++static int v4l2_request_set_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat, uint32_t buffersize) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ struct v4l2_format format = { ++ .type = type, ++ }; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { ++ format.fmt.pix_mp.width = avctx->coded_width; ++ format.fmt.pix_mp.height = avctx->coded_height; ++ format.fmt.pix_mp.pixelformat = pixelformat; ++ format.fmt.pix_mp.plane_fmt[0].sizeimage = buffersize; ++ format.fmt.pix_mp.num_planes = 1; ++ } else { ++ format.fmt.pix.width = avctx->coded_width; ++ format.fmt.pix.height = avctx->coded_height; ++ format.fmt.pix.pixelformat = pixelformat; ++ format.fmt.pix.sizeimage = buffersize; ++ } ++ ++ return ioctl(ctx->video_fd, VIDIOC_S_FMT, &format); ++} ++ ++static int v4l2_request_select_capture_format(AVCodecContext *avctx) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ enum v4l2_buf_type type = ctx->format.type; ++ ++#if 0 ++ struct v4l2_format format = { ++ .type = type, ++ }; ++ struct v4l2_fmtdesc fmtdesc = { ++ .index = 0, ++ .type = type, ++ }; ++ uint32_t pixelformat; ++ int i; ++ ++ if (ioctl(ctx->video_fd, VIDIOC_G_FMT, &format) < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return -1; ++ } ++ ++ pixelformat = V4L2_TYPE_IS_MULTIPLANAR(type) ? format.fmt.pix_mp.pixelformat : format.fmt.pix.pixelformat; ++ ++ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { ++ if (pixelformat == v4l2_request_capture_pixelformats[i]) ++ return v4l2_request_set_format(avctx, type, pixelformat, 0); ++ } ++ ++ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) { ++ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { ++ if (fmtdesc.pixelformat == v4l2_request_capture_pixelformats[i]) ++ return v4l2_request_set_format(avctx, type, fmtdesc.pixelformat, 0); ++ } ++ ++ fmtdesc.index++; ++ } ++#else ++ for (int i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { ++ uint32_t pixelformat = v4l2_request_capture_pixelformats[i]; ++ if (!v4l2_request_try_format(avctx, type, pixelformat)) ++ return v4l2_request_set_format(avctx, type, pixelformat, 0); ++ } ++#endif ++ ++ return -1; ++} ++ ++static int v4l2_request_probe_video_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret = AVERROR(EINVAL); ++ struct v4l2_capability capability = {0}; ++ unsigned int capabilities = 0; ++ ++ const char *path = udev_device_get_devnode(device); ++ if (!path) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get video device devnode failed\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ctx->video_fd = open(path, O_RDWR | O_NONBLOCK, 0); ++ if (ctx->video_fd < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCAP, &capability); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get video capability failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ if (capability.capabilities & V4L2_CAP_DEVICE_CAPS) ++ capabilities = capability.device_caps; ++ else ++ capabilities = capability.capabilities; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s capabilities=%u\n", __func__, avctx, ctx, path, capabilities); ++ ++ if ((capabilities & V4L2_CAP_STREAMING) != V4L2_CAP_STREAMING) { ++ av_log(avctx, AV_LOG_ERROR, "%s: missing required streaming capability\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ if ((capabilities & V4L2_CAP_VIDEO_M2M_MPLANE) == V4L2_CAP_VIDEO_M2M_MPLANE) { ++ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; ++ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ++ } else if ((capabilities & V4L2_CAP_VIDEO_M2M) == V4L2_CAP_VIDEO_M2M) { ++ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ++ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ } else { ++ av_log(avctx, AV_LOG_ERROR, "%s: missing required mem2mem capability\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = v4l2_request_try_format(avctx, ctx->output_type, pixelformat); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_WARNING, "%s: try output format failed\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = v4l2_request_set_controls(ctx, -1, control, count); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = v4l2_request_set_format(avctx, ctx->output_type, pixelformat, buffersize); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: set output format failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = v4l2_request_select_capture_format(avctx); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_WARNING, "%s: select capture format failed\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ return 0; ++ ++fail: ++ if (ctx->video_fd >= 0) { ++ close(ctx->video_fd); ++ ctx->video_fd = -1; ++ } ++ return ret; ++} ++ ++static int v4l2_request_init_context(AVCodecContext *avctx) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &ctx->format); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) { ++ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, ctx->format.fmt.pix_mp.pixelformat, ctx->format.fmt.pix_mp.width, ctx->format.fmt.pix_mp.height, ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline, ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage, ctx->format.fmt.pix_mp.num_planes); ++ } else { ++ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, ctx->format.fmt.pix.pixelformat, ctx->format.fmt.pix.width, ctx->format.fmt.pix.height, ctx->format.fmt.pix.bytesperline, ctx->format.fmt.pix.sizeimage); ++ } ++ ++ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_DRM); ++ if (ret < 0) ++ goto fail; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->output_type); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: output stream on failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->format.type); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: capture stream on failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ return 0; ++ ++fail: ++ ff_v4l2_request_uninit(avctx); ++ return ret; ++} ++ ++static int v4l2_request_probe_media_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ struct media_device_info device_info = {0}; ++ struct media_v2_topology topology = {0}; ++ struct media_v2_interface *interfaces = NULL; ++ struct udev *udev = udev_device_get_udev(device); ++ struct udev_device *video_device; ++ dev_t devnum; ++ ++ const char *path = udev_device_get_devnode(device); ++ if (!path) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get media device devnode failed\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ctx->media_fd = open(path, O_RDWR, 0); ++ if (ctx->media_fd < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = ioctl(ctx->media_fd, MEDIA_IOC_DEVICE_INFO, &device_info); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get media device info failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s driver=%s\n", __func__, avctx, ctx, path, device_info.driver); ++ ++ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ if (topology.num_interfaces <= 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: media device has no interfaces\n", __func__); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ interfaces = av_mallocz(topology.num_interfaces * sizeof(struct media_v2_interface)); ++ if (!interfaces) { ++ av_log(avctx, AV_LOG_ERROR, "%s: allocating media interface struct failed\n", __func__); ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ ++ topology.ptr_interfaces = (__u64)(uintptr_t)interfaces; ++ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ret = AVERROR(EINVAL); ++ goto fail; ++ } ++ ++ ret = AVERROR(EINVAL); ++ for (int i = 0; i < topology.num_interfaces; i++) { ++ if (interfaces[i].intf_type != MEDIA_INTF_T_V4L_VIDEO) ++ continue; ++ ++ devnum = makedev(interfaces[i].devnode.major, interfaces[i].devnode.minor); ++ video_device = udev_device_new_from_devnum(udev, 'c', devnum); ++ if (!video_device) { ++ av_log(avctx, AV_LOG_ERROR, "%s: video_device=%p\n", __func__, video_device); ++ continue; ++ } ++ ++ ret = v4l2_request_probe_video_device(video_device, avctx, pixelformat, buffersize, control, count); ++ udev_device_unref(video_device); ++ ++ if (!ret) ++ break; ++ } ++ ++ av_freep(&interfaces); ++ return ret; ++ ++fail: ++ av_freep(&interfaces); ++ if (ctx->media_fd >= 0) { ++ close(ctx->media_fd); ++ ctx->media_fd = -1; ++ } ++ return ret; ++} ++ ++int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret = AVERROR(EINVAL); ++ struct udev *udev; ++ struct udev_enumerate *enumerate; ++ struct udev_list_entry *devices; ++ struct udev_list_entry *entry; ++ struct udev_device *device; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p hw_device_ctx=%p hw_frames_ctx=%p\n", __func__, avctx, avctx->hw_device_ctx, avctx->hw_frames_ctx); ++ ++ ctx->media_fd = -1; ++ ctx->video_fd = -1; ++ ctx->timestamp = 0; ++ ++ udev = udev_new(); ++ if (!udev) { ++ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev context failed\n", __func__); ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ ++ enumerate = udev_enumerate_new(udev); ++ if (!enumerate) { ++ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev enumerator failed\n", __func__); ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } ++ ++ udev_enumerate_add_match_subsystem(enumerate, "media"); ++ udev_enumerate_scan_devices(enumerate); ++ ++ devices = udev_enumerate_get_list_entry(enumerate); ++ udev_list_entry_foreach(entry, devices) { ++ const char *path = udev_list_entry_get_name(entry); ++ if (!path) ++ continue; ++ ++ device = udev_device_new_from_syspath(udev, path); ++ if (!device) ++ continue; ++ ++ ret = v4l2_request_probe_media_device(device, avctx, pixelformat, buffersize, control, count); ++ udev_device_unref(device); ++ ++ if (!ret) ++ break; ++ } ++ ++ udev_enumerate_unref(enumerate); ++ ++ if (!ret) ++ ret = v4l2_request_init_context(avctx); ++ ++fail: ++ udev_unref(udev); ++ return ret; ++} ++ ++int ff_v4l2_request_uninit(AVCodecContext *avctx) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p\n", __func__, avctx, ctx); ++ ++ if (ctx->video_fd >= 0) { ++ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->output_type); ++ if (ret < 0) ++ av_log(avctx, AV_LOG_ERROR, "%s: output stream off failed, %s (%d)\n", __func__, strerror(errno), errno); ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->format.type); ++ if (ret < 0) ++ av_log(avctx, AV_LOG_ERROR, "%s: capture stream off failed, %s (%d)\n", __func__, strerror(errno), errno); ++ } ++ ++ if (avctx->hw_frames_ctx) { ++ AVHWFramesContext *hwfc = (AVHWFramesContext*)avctx->hw_frames_ctx->data; ++ av_buffer_pool_flush(hwfc->pool); ++ } ++ ++ if (ctx->video_fd >= 0) ++ close(ctx->video_fd); ++ ++ if (ctx->media_fd >= 0) ++ close(ctx->media_fd); ++ ++ return 0; ++} ++ ++static int v4l2_request_buffer_alloc(AVCodecContext *avctx, V4L2RequestBuffer *buf, enum v4l2_buf_type type) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ struct v4l2_plane planes[1] = {}; ++ struct v4l2_create_buffers buffers = { ++ .count = 1, ++ .memory = V4L2_MEMORY_MMAP, ++ .format.type = type, ++ }; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p buf=%p type=%u\n", __func__, avctx, buf, type); ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &buffers.format); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: get format failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); ++ return ret; ++ } ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(buffers.format.type)) { ++ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, buffers.format.fmt.pix_mp.pixelformat, buffers.format.fmt.pix_mp.width, buffers.format.fmt.pix_mp.height, buffers.format.fmt.pix_mp.plane_fmt[0].bytesperline, buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage, buffers.format.fmt.pix_mp.num_planes); ++ } else { ++ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, buffers.format.fmt.pix.pixelformat, buffers.format.fmt.pix.width, buffers.format.fmt.pix.height, buffers.format.fmt.pix.bytesperline, buffers.format.fmt.pix.sizeimage); ++ } ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); ++ return ret; ++ } ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { ++ buf->width = buffers.format.fmt.pix_mp.width; ++ buf->height = buffers.format.fmt.pix_mp.height; ++ buf->size = buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage; ++ buf->buffer.length = 1; ++ buf->buffer.m.planes = planes; ++ } else { ++ buf->width = buffers.format.fmt.pix.width; ++ buf->height = buffers.format.fmt.pix.height; ++ buf->size = buffers.format.fmt.pix.sizeimage; ++ } ++ ++ buf->index = buffers.index; ++ buf->capabilities = buffers.capabilities; ++ buf->used = 0; ++ ++ buf->buffer.type = type; ++ buf->buffer.memory = V4L2_MEMORY_MMAP; ++ buf->buffer.index = buf->index; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_QUERYBUF, &buf->buffer); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: query buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno); ++ return ret; ++ } ++ ++ if (V4L2_TYPE_IS_OUTPUT(type)) { ++ void *addr = mmap(NULL, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED, ctx->video_fd, V4L2_TYPE_IS_MULTIPLANAR(type) ? buf->buffer.m.planes[0].m.mem_offset : buf->buffer.m.offset); ++ if (addr == MAP_FAILED) { ++ av_log(avctx, AV_LOG_ERROR, "%s: mmap failed, %s (%d)\n", __func__, strerror(errno), errno); ++ return -1; ++ } ++ ++ buf->addr = (uint8_t*)addr; ++ } else { ++ struct v4l2_exportbuffer exportbuffer = { ++ .type = type, ++ .index = buf->index, ++ .flags = O_RDONLY, ++ }; ++ ++ ret = ioctl(ctx->video_fd, VIDIOC_EXPBUF, &exportbuffer); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: export buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno); ++ return ret; ++ } ++ ++ buf->fd = exportbuffer.fd; ++ } ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size); ++ return 0; ++} ++ ++static void v4l2_request_buffer_free(V4L2RequestBuffer *buf) ++{ ++ av_log(NULL, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size); ++ ++ if (buf->addr) ++ munmap(buf->addr, buf->size); ++ ++ if (buf->fd >= 0) ++ close(buf->fd); ++} ++ ++static void v4l2_request_frame_free(void *opaque, uint8_t *data) ++{ ++ AVCodecContext *avctx = opaque; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)data; ++ ++ av_log(NULL, AV_LOG_DEBUG, "%s: avctx=%p data=%p request_fd=%d\n", __func__, avctx, data, req->request_fd); ++ ++ if (req->request_fd >= 0) ++ close(req->request_fd); ++ ++ v4l2_request_buffer_free(&req->capture); ++ v4l2_request_buffer_free(&req->output); ++ ++ av_free(data); ++} ++ ++static AVBufferRef *v4l2_request_frame_alloc(void *opaque, int size) ++{ ++ AVCodecContext *avctx = opaque; ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ V4L2RequestDescriptor *req; ++ AVBufferRef *ref; ++ uint8_t *data; ++ int ret; ++ ++ data = av_mallocz(size); ++ if (!data) ++ return NULL; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p\n", __func__, avctx, size, data); ++ ++ ref = av_buffer_create(data, size, v4l2_request_frame_free, avctx, 0); ++ if (!ref) { ++ av_freep(&data); ++ return NULL; ++ } ++ ++ req = (V4L2RequestDescriptor*)data; ++ req->request_fd = -1; ++ req->output.fd = -1; ++ req->capture.fd = -1; ++ ++ ret = v4l2_request_buffer_alloc(avctx, &req->output, ctx->output_type); ++ if (ret < 0) { ++ av_buffer_unref(&ref); ++ return NULL; ++ } ++ ++ ret = v4l2_request_buffer_alloc(avctx, &req->capture, ctx->format.type); ++ if (ret < 0) { ++ av_buffer_unref(&ref); ++ return NULL; ++ } ++ ++ ret = ioctl(ctx->media_fd, MEDIA_IOC_REQUEST_ALLOC, &req->request_fd); ++ if (ret < 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: request alloc failed, %s (%d)\n", __func__, strerror(errno), errno); ++ av_buffer_unref(&ref); ++ return NULL; ++ } ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p request_fd=%d\n", __func__, avctx, size, data, req->request_fd); ++ return ref; ++} ++ ++static void v4l2_request_pool_free(void *opaque) ++{ ++ av_log(NULL, AV_LOG_DEBUG, "%s: opaque=%p\n", __func__, opaque); ++} ++ ++static void v4l2_request_hwframe_ctx_free(AVHWFramesContext *hwfc) ++{ ++ av_log(NULL, AV_LOG_DEBUG, "%s: hwfc=%p pool=%p\n", __func__, hwfc, hwfc->pool); ++ ++ av_buffer_pool_flush(hwfc->pool); ++ av_buffer_pool_uninit(&hwfc->pool); ++} ++ ++int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx) ++{ ++ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; ++ AVHWFramesContext *hwfc = (AVHWFramesContext*)hw_frames_ctx->data; ++ ++ hwfc->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc->sw_format = AV_PIX_FMT_NV12; ++ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) { ++ hwfc->width = ctx->format.fmt.pix_mp.width; ++ hwfc->height = ctx->format.fmt.pix_mp.height; ++ } else { ++ hwfc->width = ctx->format.fmt.pix.width; ++ hwfc->height = ctx->format.fmt.pix.height; ++ } ++ ++ hwfc->pool = av_buffer_pool_init2(sizeof(V4L2RequestDescriptor), avctx, v4l2_request_frame_alloc, v4l2_request_pool_free); ++ if (!hwfc->pool) ++ return AVERROR(ENOMEM); ++ ++ hwfc->free = v4l2_request_hwframe_ctx_free; ++ ++ hwfc->initial_pool_size = 1; ++ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_VP9: ++ hwfc->initial_pool_size += 8; ++ break; ++ case AV_CODEC_ID_VP8: ++ hwfc->initial_pool_size += 3; ++ break; ++ default: ++ hwfc->initial_pool_size += 2; ++ } ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p hw_frames_ctx=%p hwfc=%p pool=%p width=%d height=%d initial_pool_size=%d\n", __func__, avctx, ctx, hw_frames_ctx, hwfc, hwfc->pool, hwfc->width, hwfc->height, hwfc->initial_pool_size); ++ ++ return 0; ++} +diff --git a/libavcodec/v4l2_request.h b/libavcodec/v4l2_request.h +new file mode 100644 +index 0000000000..58d2aa70af +--- /dev/null ++++ b/libavcodec/v4l2_request.h +@@ -0,0 +1,77 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#ifndef AVCODEC_V4L2_REQUEST_H ++#define AVCODEC_V4L2_REQUEST_H ++ ++#include ++ ++#include "libavutil/hwcontext_drm.h" ++ ++typedef struct V4L2RequestContext { ++ int video_fd; ++ int media_fd; ++ enum v4l2_buf_type output_type; ++ struct v4l2_format format; ++ int timestamp; ++} V4L2RequestContext; ++ ++typedef struct V4L2RequestBuffer { ++ int index; ++ int fd; ++ uint8_t *addr; ++ uint32_t width; ++ uint32_t height; ++ uint32_t size; ++ uint32_t used; ++ uint32_t capabilities; ++ struct v4l2_buffer buffer; ++} V4L2RequestBuffer; ++ ++typedef struct V4L2RequestDescriptor { ++ AVDRMFrameDescriptor drm; ++ int request_fd; ++ V4L2RequestBuffer output; ++ V4L2RequestBuffer capture; ++} V4L2RequestDescriptor; ++ ++uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame); ++ ++int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame); ++ ++int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size); ++ ++int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count); ++ ++int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count); ++ ++int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control); ++ ++int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id); ++ ++int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice); ++ ++int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count); ++ ++int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count); ++ ++int ff_v4l2_request_uninit(AVCodecContext *avctx); ++ ++int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); ++ ++#endif /* AVCODEC_V4L2_REQUEST_H */ + +From ac7d0ac7775219d769deca3fb34c92b25411c947 Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 03/18] Add V4L2 request API mpeg2 hwaccel + +Signed-off-by: Jonas Karlman +--- + configure | 3 + + libavcodec/Makefile | 1 + + libavcodec/hwaccels.h | 1 + + libavcodec/mpeg12dec.c | 6 ++ + libavcodec/v4l2_request_mpeg2.c | 154 ++++++++++++++++++++++++++++++++ + 5 files changed, 165 insertions(+) + create mode 100644 libavcodec/v4l2_request_mpeg2.c + +diff --git a/configure b/configure +index 9f9909a236..6b157d6d3e 100755 +--- a/configure ++++ b/configure +@@ -2967,6 +2967,8 @@ mpeg2_dxva2_hwaccel_deps="dxva2" + mpeg2_dxva2_hwaccel_select="mpeg2video_decoder" + mpeg2_nvdec_hwaccel_deps="nvdec" + mpeg2_nvdec_hwaccel_select="mpeg2video_decoder" ++mpeg2_v4l2request_hwaccel_deps="v4l2_request mpeg2_v4l2_request" ++mpeg2_v4l2request_hwaccel_select="mpeg2video_decoder" + mpeg2_vaapi_hwaccel_deps="vaapi" + mpeg2_vaapi_hwaccel_select="mpeg2video_decoder" + mpeg2_vdpau_hwaccel_deps="vdpau" +@@ -6567,6 +6569,7 @@ if enabled v4l2_m2m; then + fi + + check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns ++check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" + + check_headers sys/videoio.h + test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index d742205168..8963bd3e91 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -922,6 +922,7 @@ OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o + OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o + OBJS-$(CONFIG_MPEG2_NVDEC_HWACCEL) += nvdec_mpeg12.o + OBJS-$(CONFIG_MPEG2_QSV_HWACCEL) += qsvdec_other.o ++OBJS-$(CONFIG_MPEG2_V4L2REQUEST_HWACCEL) += v4l2_request_mpeg2.o + OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o + OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o + OBJS-$(CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o +diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h +index 6109c89bd6..172a546bb2 100644 +--- a/libavcodec/hwaccels.h ++++ b/libavcodec/hwaccels.h +@@ -47,6 +47,7 @@ extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel; + extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel; + extern const AVHWAccel ff_mpeg2_nvdec_hwaccel; + extern const AVHWAccel ff_mpeg2_dxva2_hwaccel; ++extern const AVHWAccel ff_mpeg2_v4l2request_hwaccel; + extern const AVHWAccel ff_mpeg2_vaapi_hwaccel; + extern const AVHWAccel ff_mpeg2_vdpau_hwaccel; + extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel; +diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c +index 99e56532a5..15aaf97a34 100644 +--- a/libavcodec/mpeg12dec.c ++++ b/libavcodec/mpeg12dec.c +@@ -1154,6 +1154,9 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { + #endif + #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL + AV_PIX_FMT_VIDEOTOOLBOX, ++#endif ++#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL ++ AV_PIX_FMT_DRM_PRIME, + #endif + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_NONE +@@ -2952,6 +2955,9 @@ AVCodec ff_mpeg2video_decoder = { + #endif + #if CONFIG_MPEG2_XVMC_HWACCEL + HWACCEL_XVMC(mpeg2), ++#endif ++#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL ++ HWACCEL_V4L2REQUEST(mpeg2), + #endif + NULL + }, +diff --git a/libavcodec/v4l2_request_mpeg2.c b/libavcodec/v4l2_request_mpeg2.c +new file mode 100644 +index 0000000000..88d86cc4c2 +--- /dev/null ++++ b/libavcodec/v4l2_request_mpeg2.c +@@ -0,0 +1,154 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "hwconfig.h" ++#include "mpegvideo.h" ++#include "v4l2_request.h" ++ ++typedef struct V4L2RequestControlsMPEG2 { ++ struct v4l2_ctrl_mpeg2_slice_params slice_params; ++ struct v4l2_ctrl_mpeg2_quantization quantization; ++} V4L2RequestControlsMPEG2; ++ ++static int v4l2_request_mpeg2_start_frame(AVCodecContext *avctx, ++ av_unused const uint8_t *buffer, ++ av_unused uint32_t size) ++{ ++ const MpegEncContext *s = avctx->priv_data; ++ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)s->current_picture_ptr->f->data[0]; ++ ++ controls->slice_params = (struct v4l2_ctrl_mpeg2_slice_params) { ++ .bit_size = 0, ++ .data_bit_offset = 0, ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ ++ .quantiser_scale_code = s->qscale >> 1, ++ ++ .sequence = { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ ++ .horizontal_size = s->width, ++ .vertical_size = s->height, ++ .vbv_buffer_size = req->output.size, ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ ++ .profile_and_level_indication = 0, ++ .progressive_sequence = s->progressive_sequence, ++ .chroma_format = s->chroma_format, ++ }, ++ ++ .picture = { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ ++ .picture_coding_type = s->pict_type, ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ ++ .f_code[0][0] = s->mpeg_f_code[0][0], ++ .f_code[0][1] = s->mpeg_f_code[0][1], ++ .f_code[1][0] = s->mpeg_f_code[1][0], ++ .f_code[1][1] = s->mpeg_f_code[1][1], ++ .intra_dc_precision = s->intra_dc_precision, ++ .picture_structure = s->picture_structure, ++ .top_field_first = s->top_field_first, ++ .frame_pred_frame_dct = s->frame_pred_frame_dct, ++ .concealment_motion_vectors = s->concealment_motion_vectors, ++ .q_scale_type = s->q_scale_type, ++ .intra_vlc_format = s->intra_vlc_format, ++ .alternate_scan = s->alternate_scan, ++ .repeat_first_field = s->repeat_first_field, ++ .progressive_frame = s->progressive_frame, ++ }, ++ }; ++ ++ switch (s->pict_type) { ++ case AV_PICTURE_TYPE_B: ++ controls->slice_params.backward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->next_picture.f); ++ // fall-through ++ case AV_PICTURE_TYPE_P: ++ controls->slice_params.forward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->last_picture.f); ++ } ++ ++ controls->quantization = (struct v4l2_ctrl_mpeg2_quantization) { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ ++ .load_intra_quantiser_matrix = 1, ++ .load_non_intra_quantiser_matrix = 1, ++ .load_chroma_intra_quantiser_matrix = 1, ++ .load_chroma_non_intra_quantiser_matrix = 1, ++ }; ++ ++ for (int i = 0; i < 64; i++) { ++ int n = s->idsp.idct_permutation[ff_zigzag_direct[i]]; ++ controls->quantization.intra_quantiser_matrix[i] = s->intra_matrix[n]; ++ controls->quantization.non_intra_quantiser_matrix[i] = s->inter_matrix[n]; ++ controls->quantization.chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n]; ++ controls->quantization.chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n]; ++ } ++ ++ return ff_v4l2_request_reset_frame(avctx, s->current_picture_ptr->f); ++} ++ ++static int v4l2_request_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) ++{ ++ const MpegEncContext *s = avctx->priv_data; ++ ++ return ff_v4l2_request_append_output_buffer(avctx, s->current_picture_ptr->f, buffer, size); ++} ++ ++static int v4l2_request_mpeg2_end_frame(AVCodecContext *avctx) ++{ ++ const MpegEncContext *s = avctx->priv_data; ++ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)s->current_picture_ptr->f->data[0]; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS, ++ .ptr = &controls->slice_params, ++ .size = sizeof(controls->slice_params), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION, ++ .ptr = &controls->quantization, ++ .size = sizeof(controls->quantization), ++ }, ++ }; ++ ++ controls->slice_params.bit_size = req->output.used * 8; ++ ++ return ff_v4l2_request_decode_frame(avctx, s->current_picture_ptr->f, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_mpeg2_init(AVCodecContext *avctx) ++{ ++ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_MPEG2_SLICE, 1024 * 1024, NULL, 0); ++} ++ ++const AVHWAccel ff_mpeg2_v4l2request_hwaccel = { ++ .name = "mpeg2_v4l2request", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .id = AV_CODEC_ID_MPEG2VIDEO, ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .start_frame = v4l2_request_mpeg2_start_frame, ++ .decode_slice = v4l2_request_mpeg2_decode_slice, ++ .end_frame = v4l2_request_mpeg2_end_frame, ++ .frame_priv_data_size = sizeof(V4L2RequestControlsMPEG2), ++ .init = v4l2_request_mpeg2_init, ++ .uninit = ff_v4l2_request_uninit, ++ .priv_data_size = sizeof(V4L2RequestContext), ++ .frame_params = ff_v4l2_request_frame_params, ++ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++}; + +From b22e02f5933366677620bac2583e2edec5c3007c Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 04/18] Add V4L2 request API h264 hwaccel + +Signed-off-by: Jernej Skrabec +Signed-off-by: Jonas Karlman +--- + configure | 3 + + libavcodec/Makefile | 1 + + libavcodec/h264_slice.c | 4 + + libavcodec/h264dec.c | 3 + + libavcodec/hwaccels.h | 1 + + libavcodec/v4l2_request_h264.c | 460 +++++++++++++++++++++++++++++++++ + 6 files changed, 472 insertions(+) + create mode 100644 libavcodec/v4l2_request_h264.c + +diff --git a/configure b/configure +index 6b157d6d3e..1a7720ebe3 100755 +--- a/configure ++++ b/configure +@@ -2925,6 +2925,8 @@ h264_dxva2_hwaccel_deps="dxva2" + h264_dxva2_hwaccel_select="h264_decoder" + h264_nvdec_hwaccel_deps="nvdec" + h264_nvdec_hwaccel_select="h264_decoder" ++h264_v4l2request_hwaccel_deps="v4l2_request h264_v4l2_request" ++h264_v4l2request_hwaccel_select="h264_decoder" + h264_vaapi_hwaccel_deps="vaapi" + h264_vaapi_hwaccel_select="h264_decoder" + h264_vdpau_hwaccel_deps="vdpau" +@@ -6569,6 +6571,7 @@ if enabled v4l2_m2m; then + fi + + check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns ++check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;" + check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" + + check_headers sys/videoio.h +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index 8963bd3e91..9a10a292e3 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -903,6 +903,7 @@ OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o + OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o + OBJS-$(CONFIG_H264_NVDEC_HWACCEL) += nvdec_h264.o + OBJS-$(CONFIG_H264_QSV_HWACCEL) += qsvdec_h2645.o ++OBJS-$(CONFIG_H264_V4L2REQUEST_HWACCEL) += v4l2_request_h264.o + OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o + OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o + OBJS-$(CONFIG_H264_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o +diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c +index db8363e4cc..3ae11ac8a7 100644 +--- a/libavcodec/h264_slice.c ++++ b/libavcodec/h264_slice.c +@@ -759,6 +759,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) + #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \ + (CONFIG_H264_D3D11VA_HWACCEL * 2) + \ + CONFIG_H264_NVDEC_HWACCEL + \ ++ CONFIG_H264_V4L2REQUEST_HWACCEL + \ + CONFIG_H264_VAAPI_HWACCEL + \ + CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \ + CONFIG_H264_VDPAU_HWACCEL) +@@ -843,6 +844,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) + #endif + #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL + *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; ++#endif ++#if CONFIG_H264_V4L2REQUEST_HWACCEL ++ *fmt++ = AV_PIX_FMT_DRM_PRIME; + #endif + if (h->avctx->codec->pix_fmts) + choices = h->avctx->codec->pix_fmts; +diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c +index 5eedeb3c27..a504c89565 100644 +--- a/libavcodec/h264dec.c ++++ b/libavcodec/h264dec.c +@@ -1102,6 +1102,9 @@ AVCodec ff_h264_decoder = { + #endif + #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL + HWACCEL_VIDEOTOOLBOX(h264), ++#endif ++#if CONFIG_H264_V4L2REQUEST_HWACCEL ++ HWACCEL_V4L2REQUEST(h264), + #endif + NULL + }, +diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h +index 172a546bb2..44e00e79b5 100644 +--- a/libavcodec/hwaccels.h ++++ b/libavcodec/hwaccels.h +@@ -27,6 +27,7 @@ extern const AVHWAccel ff_h264_d3d11va_hwaccel; + extern const AVHWAccel ff_h264_d3d11va2_hwaccel; + extern const AVHWAccel ff_h264_dxva2_hwaccel; + extern const AVHWAccel ff_h264_nvdec_hwaccel; ++extern const AVHWAccel ff_h264_v4l2request_hwaccel; + extern const AVHWAccel ff_h264_vaapi_hwaccel; + extern const AVHWAccel ff_h264_vdpau_hwaccel; + extern const AVHWAccel ff_h264_videotoolbox_hwaccel; +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +new file mode 100644 +index 0000000000..94b9aca8ad +--- /dev/null ++++ b/libavcodec/v4l2_request_h264.c +@@ -0,0 +1,460 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "h264dec.h" ++#include "hwconfig.h" ++#include "v4l2_request.h" ++ ++typedef struct V4L2RequestControlsH264 { ++ struct v4l2_ctrl_h264_sps sps; ++ struct v4l2_ctrl_h264_pps pps; ++ struct v4l2_ctrl_h264_scaling_matrix scaling_matrix; ++ struct v4l2_ctrl_h264_decode_params decode_params; ++ struct v4l2_ctrl_h264_slice_params slice_params[MAX_SLICES]; ++ int first_slice; ++} V4L2RequestControlsH264; ++ ++typedef struct V4L2RequestContextH264 { ++ V4L2RequestContext base; ++ int decode_mode; ++ int start_code; ++ int max_slices; ++} V4L2RequestContextH264; ++ ++static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 }; ++ ++static void fill_weight_factors(struct v4l2_h264_weight_factors *factors, int list, const H264SliceContext *sl) ++{ ++ for (int i = 0; i < sl->ref_count[list]; i++) { ++ if (sl->pwt.luma_weight_flag[list]) { ++ factors->luma_weight[i] = sl->pwt.luma_weight[i][list][0]; ++ factors->luma_offset[i] = sl->pwt.luma_weight[i][list][1]; ++ } else { ++ factors->luma_weight[i] = 1 << sl->pwt.luma_log2_weight_denom; ++ factors->luma_offset[i] = 0; ++ } ++ for (int j = 0; j < 2; j++) { ++ if (sl->pwt.chroma_weight_flag[list]) { ++ factors->chroma_weight[i][j] = sl->pwt.chroma_weight[i][list][j][0]; ++ factors->chroma_offset[i][j] = sl->pwt.chroma_weight[i][list][j][1]; ++ } else { ++ factors->chroma_weight[i][j] = 1 << sl->pwt.chroma_log2_weight_denom; ++ factors->chroma_offset[i][j] = 0; ++ } ++ } ++ } ++} ++ ++static void fill_dpb_entry(struct v4l2_h264_dpb_entry *entry, const H264Picture *pic) ++{ ++ entry->reference_ts = ff_v4l2_request_get_capture_timestamp(pic->f); ++ entry->frame_num = pic->frame_num; ++ entry->pic_num = pic->pic_id; ++ entry->flags = V4L2_H264_DPB_ENTRY_FLAG_VALID; ++ if (pic->reference) ++ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_ACTIVE; ++ if (pic->long_ref) ++ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM; ++ if (pic->field_poc[0] != INT_MAX) ++ entry->top_field_order_cnt = pic->field_poc[0]; ++ if (pic->field_poc[1] != INT_MAX) ++ entry->bottom_field_order_cnt = pic->field_poc[1]; ++} ++ ++static void fill_dpb(struct v4l2_ctrl_h264_decode_params *decode, const H264Context *h) ++{ ++ int entries = 0; ++ ++ for (int i = 0; i < h->short_ref_count; i++) { ++ const H264Picture *pic = h->short_ref[i]; ++ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX)) ++ fill_dpb_entry(&decode->dpb[entries++], pic); ++ } ++ ++ if (!h->long_ref_count) ++ return; ++ ++ for (int i = 0; i < FF_ARRAY_ELEMS(h->long_ref); i++) { ++ const H264Picture *pic = h->long_ref[i]; ++ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX)) ++ fill_dpb_entry(&decode->dpb[entries++], pic); ++ } ++} ++ ++static uint8_t get_dpb_index(struct v4l2_ctrl_h264_decode_params *decode, const H264Ref *ref) ++{ ++ uint64_t timestamp; ++ ++ if (!ref->parent) ++ return 0; ++ ++ timestamp = ff_v4l2_request_get_capture_timestamp(ref->parent->f); ++ ++ for (uint8_t i = 0; i < FF_ARRAY_ELEMS(decode->dpb); i++) { ++ struct v4l2_h264_dpb_entry *entry = &decode->dpb[i]; ++ if ((entry->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID) && ++ entry->reference_ts == timestamp) ++ return i; ++ } ++ ++ return 0; ++} ++ ++static void fill_sps(struct v4l2_ctrl_h264_sps *ctrl, const H264Context *h) ++{ ++ const SPS *sps = h->ps.sps; ++ ++ *ctrl = (struct v4l2_ctrl_h264_sps) { ++ .profile_idc = sps->profile_idc, ++ .constraint_set_flags = sps->constraint_set_flags, ++ .level_idc = sps->level_idc, ++ .seq_parameter_set_id = sps->sps_id, ++ .chroma_format_idc = sps->chroma_format_idc, ++ .bit_depth_luma_minus8 = sps->bit_depth_luma - 8, ++ .bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8, ++ .log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4, ++ .pic_order_cnt_type = sps->poc_type, ++ .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4, ++ .max_num_ref_frames = sps->ref_frame_count, ++ .num_ref_frames_in_pic_order_cnt_cycle = sps->poc_cycle_length, ++ //.offset_for_ref_frame[255] - not required? not set by libva-v4l2-request - copy sps->offset_for_ref_frame ++ .offset_for_non_ref_pic = sps->offset_for_non_ref_pic, ++ .offset_for_top_to_bottom_field = sps->offset_for_top_to_bottom_field, ++ .pic_width_in_mbs_minus1 = h->mb_width - 1, ++ .pic_height_in_map_units_minus1 = sps->frame_mbs_only_flag ? h->mb_height - 1 : h->mb_height / 2 - 1, ++ }; ++ ++ if (sps->residual_color_transform_flag) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; ++ if (sps->transform_bypass) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; ++ if (sps->delta_pic_order_always_zero_flag) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO; ++ if (sps->gaps_in_frame_num_allowed_flag) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED; ++ if (sps->frame_mbs_only_flag) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY; ++ if (sps->mb_aff) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; ++ if (sps->direct_8x8_inference_flag) ++ ctrl->flags |= V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE; ++} ++ ++static void fill_pps(struct v4l2_ctrl_h264_pps *ctrl, const H264Context *h) ++{ ++ const SPS *sps = h->ps.sps; ++ const PPS *pps = h->ps.pps; ++ const H264SliceContext *sl = &h->slice_ctx[0]; ++ int qp_bd_offset = 6 * (sps->bit_depth_luma - 8); ++ ++ *ctrl = (struct v4l2_ctrl_h264_pps) { ++ .pic_parameter_set_id = sl->pps_id, ++ .seq_parameter_set_id = pps->sps_id, ++ .num_slice_groups_minus1 = pps->slice_group_count - 1, ++ .num_ref_idx_l0_default_active_minus1 = pps->ref_count[0] - 1, ++ .num_ref_idx_l1_default_active_minus1 = pps->ref_count[1] - 1, ++ .weighted_bipred_idc = pps->weighted_bipred_idc, ++ .pic_init_qp_minus26 = pps->init_qp - 26 - qp_bd_offset, ++ .pic_init_qs_minus26 = pps->init_qs - 26 - qp_bd_offset, ++ .chroma_qp_index_offset = pps->chroma_qp_index_offset[0], ++ .second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1], ++ }; ++ ++ if (pps->cabac) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE; ++ if (pps->pic_order_present) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT; ++ if (pps->weighted_pred) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_WEIGHTED_PRED; ++ if (pps->deblocking_filter_parameters_present) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT; ++ if (pps->constrained_intra_pred) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED; ++ if (pps->redundant_pic_cnt_present) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT; ++ if (pps->transform_8x8_mode) ++ ctrl->flags |= V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE; ++} ++ ++static int v4l2_request_h264_start_frame(AVCodecContext *avctx, ++ av_unused const uint8_t *buffer, ++ av_unused uint32_t size) ++{ ++ const H264Context *h = avctx->priv_data; ++ const PPS *pps = h->ps.pps; ++ const SPS *sps = h->ps.sps; ++ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; ++ ++ fill_sps(&controls->sps, h); ++ fill_pps(&controls->pps, h); ++ ++ memcpy(controls->scaling_matrix.scaling_list_4x4, pps->scaling_matrix4, sizeof(controls->scaling_matrix.scaling_list_4x4)); ++ memcpy(controls->scaling_matrix.scaling_list_8x8[0], pps->scaling_matrix8[0], sizeof(controls->scaling_matrix.scaling_list_8x8[0])); ++ memcpy(controls->scaling_matrix.scaling_list_8x8[1], pps->scaling_matrix8[3], sizeof(controls->scaling_matrix.scaling_list_8x8[1])); ++ ++ if (sps->chroma_format_idc == 3) { ++ memcpy(controls->scaling_matrix.scaling_list_8x8[2], pps->scaling_matrix8[1], sizeof(controls->scaling_matrix.scaling_list_8x8[2])); ++ memcpy(controls->scaling_matrix.scaling_list_8x8[3], pps->scaling_matrix8[4], sizeof(controls->scaling_matrix.scaling_list_8x8[3])); ++ memcpy(controls->scaling_matrix.scaling_list_8x8[4], pps->scaling_matrix8[2], sizeof(controls->scaling_matrix.scaling_list_8x8[4])); ++ memcpy(controls->scaling_matrix.scaling_list_8x8[5], pps->scaling_matrix8[5], sizeof(controls->scaling_matrix.scaling_list_8x8[5])); ++ } ++ ++ controls->decode_params = (struct v4l2_ctrl_h264_decode_params) { ++ .num_slices = 0, ++ .nal_ref_idc = h->nal_ref_idc, ++ .top_field_order_cnt = h->cur_pic_ptr->field_poc[0] != INT_MAX ? h->cur_pic_ptr->field_poc[0] : 0, ++ .bottom_field_order_cnt = h->cur_pic_ptr->field_poc[1] != INT_MAX ? h->cur_pic_ptr->field_poc[1] : 0, ++ }; ++ ++ if (h->picture_idr) ++ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC; ++ ++ fill_dpb(&controls->decode_params, h); ++ ++ controls->first_slice = !FIELD_PICTURE(h) || h->first_field; ++ ++ return ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f); ++} ++ ++static int v4l2_request_h264_queue_decode(AVCodecContext *avctx, int last_slice) ++{ ++ const H264Context *h = avctx->priv_data; ++ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; ++ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_SPS, ++ .ptr = &controls->sps, ++ .size = sizeof(controls->sps), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_PPS, ++ .ptr = &controls->pps, ++ .size = sizeof(controls->pps), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX, ++ .ptr = &controls->scaling_matrix, ++ .size = sizeof(controls->scaling_matrix), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS, ++ .ptr = &controls->slice_params, ++ .size = sizeof(controls->slice_params[0]) * FFMAX(FFMIN(controls->decode_params.num_slices, MAX_SLICES), ctx->max_slices), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS, ++ .ptr = &controls->decode_params, ++ .size = sizeof(controls->decode_params), ++ }, ++ }; ++ ++ if (ctx->decode_mode == V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED) ++ return ff_v4l2_request_decode_slice(avctx, h->cur_pic_ptr->f, control, FF_ARRAY_ELEMS(control), controls->first_slice, last_slice); ++ ++ return ff_v4l2_request_decode_frame(avctx, h->cur_pic_ptr->f, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) ++{ ++ const H264Context *h = avctx->priv_data; ++ const PPS *pps = h->ps.pps; ++ const H264SliceContext *sl = &h->slice_ctx[0]; ++ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; ++ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)h->cur_pic_ptr->f->data[0]; ++ int i, ret, count, slice = FFMIN(controls->decode_params.num_slices, MAX_SLICES - 1); ++ ++ if (ctx->decode_mode == V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED && slice) { ++ ret = v4l2_request_h264_queue_decode(avctx, 0); ++ if (ret) ++ return ret; ++ ++ ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f); ++ slice = controls->decode_params.num_slices = 0; ++ controls->first_slice = 0; ++ } ++ ++ controls->slice_params[slice] = (struct v4l2_ctrl_h264_slice_params) { ++ /* Size in bytes, including header */ ++ .size = 0, ++ .start_byte_offset = req->output.used, ++ /* Offset in bits to slice_data() from the beginning of this slice. */ ++ .header_bit_size = get_bits_count(&sl->gb), ++ ++ .first_mb_in_slice = sl->first_mb_addr, ++ .slice_type = ff_h264_get_slice_type(sl), ++ .pic_parameter_set_id = sl->pps_id, ++ .colour_plane_id = 0, /* what is this? */ ++ .frame_num = h->poc.frame_num, ++ .idr_pic_id = 0, /* what is this? */ ++ .pic_order_cnt_lsb = sl->poc_lsb, ++ .delta_pic_order_cnt_bottom = sl->delta_poc_bottom, ++ .delta_pic_order_cnt0 = sl->delta_poc[0], ++ .delta_pic_order_cnt1 = sl->delta_poc[1], ++ .redundant_pic_cnt = sl->redundant_pic_count, ++ ++ /* Size in bits of dec_ref_pic_marking() syntax element. */ ++ .dec_ref_pic_marking_bit_size = 0, ++ /* Size in bits of pic order count syntax. */ ++ .pic_order_cnt_bit_size = 0, ++ ++ .cabac_init_idc = sl->cabac_init_idc, ++ .slice_qp_delta = sl->qscale - pps->init_qp, ++ .slice_qs_delta = 0, /* XXX not implemented by FFmpeg */ ++ .disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter, ++ .slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2, ++ .slice_beta_offset_div2 = sl->slice_beta_offset / 2, ++ .slice_group_change_cycle = 0, /* what is this? */ ++ ++ .num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0, ++ .num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0, ++ }; ++ ++ if (FIELD_PICTURE(h)) ++ controls->slice_params[slice].flags |= V4L2_H264_SLICE_FLAG_FIELD_PIC; ++ if (h->picture_structure == PICT_BOTTOM_FIELD) ++ controls->slice_params[slice].flags |= V4L2_H264_SLICE_FLAG_BOTTOM_FIELD; ++ if (sl->slice_type == AV_PICTURE_TYPE_B && sl->direct_spatial_mv_pred) ++ controls->slice_params[slice].flags |= V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; ++ ++ controls->slice_params[slice].pred_weight_table.chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom; ++ controls->slice_params[slice].pred_weight_table.luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom; ++ ++ count = sl->list_count > 0 ? sl->ref_count[0] : 0; ++ for (i = 0; i < count; i++) ++ controls->slice_params[slice].ref_pic_list0[i] = get_dpb_index(&controls->decode_params, &sl->ref_list[0][i]); ++ if (count) ++ fill_weight_factors(&controls->slice_params[slice].pred_weight_table.weight_factors[0], 0, sl); ++ ++ count = sl->list_count > 1 ? sl->ref_count[1] : 0; ++ for (i = 0; i < count; i++) ++ controls->slice_params[slice].ref_pic_list1[i] = get_dpb_index(&controls->decode_params, &sl->ref_list[1][i]); ++ if (count) ++ fill_weight_factors(&controls->slice_params[slice].pred_weight_table.weight_factors[1], 1, sl); ++ ++ if (ctx->start_code == V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B) { ++ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, nalu_slice_start_code, 3); ++ if (ret) ++ return ret; ++ } ++ ++ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, buffer, size); ++ if (ret) ++ return ret; ++ ++ controls->slice_params[slice].size = req->output.used - controls->slice_params[slice].start_byte_offset; ++ controls->decode_params.num_slices++; ++ return 0; ++} ++ ++static int v4l2_request_h264_end_frame(AVCodecContext *avctx) ++{ ++ const H264Context *h = avctx->priv_data; ++ return v4l2_request_h264_queue_decode(avctx, !FIELD_PICTURE(h) || !h->first_field); ++} ++ ++static int v4l2_request_h264_set_controls(AVCodecContext *avctx) ++{ ++ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ struct v4l2_ext_control control[] = { ++ { .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE, }, ++ { .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE, }, ++ }; ++ struct v4l2_query_ext_ctrl slice_params = { ++ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS, ++ }; ++ ++ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE); ++ if (ctx->decode_mode != V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED && ++ ctx->decode_mode != V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode); ++ return AVERROR(EINVAL); ++ } ++ ++ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_H264_START_CODE); ++ if (ctx->start_code != V4L2_MPEG_VIDEO_H264_START_CODE_NONE && ++ ctx->start_code != V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code); ++ return AVERROR(EINVAL); ++ } ++ ++ ret = ff_v4l2_request_query_control(avctx, &slice_params); ++ if (ret) ++ return ret; ++ ++ ctx->max_slices = slice_params.elems; ++ if (ctx->max_slices > MAX_SLICES) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported max slices, %d\n", __func__, ctx->max_slices); ++ return AVERROR(EINVAL); ++ } ++ ++ control[0].value = ctx->decode_mode; ++ control[1].value = ctx->start_code; ++ ++ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_h264_init(AVCodecContext *avctx) ++{ ++ const H264Context *h = avctx->priv_data; ++ struct v4l2_ctrl_h264_sps sps; ++ struct v4l2_ctrl_h264_pps pps; ++ int ret; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_SPS, ++ .ptr = &sps, ++ .size = sizeof(sps), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_H264_PPS, ++ .ptr = &pps, ++ .size = sizeof(pps), ++ }, ++ }; ++ ++ fill_sps(&sps, h); ++ fill_pps(&pps, h); ++ ++ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_H264_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control)); ++ if (ret) ++ return ret; ++ ++ return v4l2_request_h264_set_controls(avctx); ++} ++ ++const AVHWAccel ff_h264_v4l2request_hwaccel = { ++ .name = "h264_v4l2request", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .id = AV_CODEC_ID_H264, ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .start_frame = v4l2_request_h264_start_frame, ++ .decode_slice = v4l2_request_h264_decode_slice, ++ .end_frame = v4l2_request_h264_end_frame, ++ .frame_priv_data_size = sizeof(V4L2RequestControlsH264), ++ .init = v4l2_request_h264_init, ++ .uninit = ff_v4l2_request_uninit, ++ .priv_data_size = sizeof(V4L2RequestContextH264), ++ .frame_params = ff_v4l2_request_frame_params, ++ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++}; + +From c804445e166d743ce41831556c968ed9e3a414f5 Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 05/18] Add V4L2 request API hevc hwaccel + +Signed-off-by: Jernej Skrabec +Signed-off-by: Jonas Karlman +--- + configure | 3 + + libavcodec/Makefile | 1 + + libavcodec/hevcdec.c | 10 + + libavcodec/hwaccels.h | 1 + + libavcodec/v4l2_request_hevc.c | 533 +++++++++++++++++++++++++++++++++ + 5 files changed, 548 insertions(+) + create mode 100644 libavcodec/v4l2_request_hevc.c + +diff --git a/configure b/configure +index 1a7720ebe3..58abd99335 100755 +--- a/configure ++++ b/configure +@@ -2941,6 +2941,8 @@ hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC" + hevc_dxva2_hwaccel_select="hevc_decoder" + hevc_nvdec_hwaccel_deps="nvdec" + hevc_nvdec_hwaccel_select="hevc_decoder" ++hevc_v4l2request_hwaccel_deps="v4l2_request hevc_v4l2_request" ++hevc_v4l2request_hwaccel_select="hevc_decoder" + hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC" + hevc_vaapi_hwaccel_select="hevc_decoder" + hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC" +@@ -6572,6 +6574,7 @@ fi + + check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns + check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;" ++check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;" + check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" + + check_headers sys/videoio.h +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index 9a10a292e3..5d0e1d7dae 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -911,6 +911,7 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o + OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o + OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o + OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec_h2645.o ++OBJS-$(CONFIG_HEVC_V4L2REQUEST_HWACCEL) += v4l2_request_hevc.o + OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o + OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o + OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o +diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c +index 0772608a30..d01b7b34bc 100644 +--- a/libavcodec/hevcdec.c ++++ b/libavcodec/hevcdec.c +@@ -372,6 +372,7 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) + #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ + CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ + CONFIG_HEVC_NVDEC_HWACCEL + \ ++ CONFIG_HEVC_V4L2REQUEST_HWACCEL + \ + CONFIG_HEVC_VAAPI_HWACCEL + \ + CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + \ + CONFIG_HEVC_VDPAU_HWACCEL) +@@ -398,6 +399,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) + #endif + #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; ++#endif ++#if CONFIG_HEVC_V4L2REQUEST_HWACCEL ++ *fmt++ = AV_PIX_FMT_DRM_PRIME; + #endif + break; + case AV_PIX_FMT_YUV420P10: +@@ -416,6 +420,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) + #endif + #if CONFIG_HEVC_NVDEC_HWACCEL + *fmt++ = AV_PIX_FMT_CUDA; ++#endif ++#if CONFIG_HEVC_V4L2REQUEST_HWACCEL ++ *fmt++ = AV_PIX_FMT_DRM_PRIME; + #endif + break; + case AV_PIX_FMT_YUV444P: +@@ -3588,6 +3595,9 @@ AVCodec ff_hevc_decoder = { + #endif + #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL + HWACCEL_VIDEOTOOLBOX(hevc), ++#endif ++#if CONFIG_HEVC_V4L2REQUEST_HWACCEL ++ HWACCEL_V4L2REQUEST(hevc), + #endif + NULL + }, +diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h +index 44e00e79b5..e2f90a5fdd 100644 +--- a/libavcodec/hwaccels.h ++++ b/libavcodec/hwaccels.h +@@ -35,6 +35,7 @@ extern const AVHWAccel ff_hevc_d3d11va_hwaccel; + extern const AVHWAccel ff_hevc_d3d11va2_hwaccel; + extern const AVHWAccel ff_hevc_dxva2_hwaccel; + extern const AVHWAccel ff_hevc_nvdec_hwaccel; ++extern const AVHWAccel ff_hevc_v4l2request_hwaccel; + extern const AVHWAccel ff_hevc_vaapi_hwaccel; + extern const AVHWAccel ff_hevc_vdpau_hwaccel; + extern const AVHWAccel ff_hevc_videotoolbox_hwaccel; +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +new file mode 100644 +index 0000000000..f724909546 +--- /dev/null ++++ b/libavcodec/v4l2_request_hevc.c +@@ -0,0 +1,533 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "hevcdec.h" ++#include "hwconfig.h" ++#include "v4l2_request.h" ++ ++#define MAX_SLICES 16 ++ ++typedef struct V4L2RequestControlsHEVC { ++ struct v4l2_ctrl_hevc_sps sps; ++ struct v4l2_ctrl_hevc_pps pps; ++ struct v4l2_ctrl_hevc_slice_params slice_params[MAX_SLICES]; ++ int first_slice; ++ int num_slices; //TODO: this should be in control ++} V4L2RequestControlsHEVC; ++ ++typedef struct V4L2RequestContextHEVC { ++ V4L2RequestContext base; ++ int decode_mode; ++ int start_code; ++ int max_slices; ++} V4L2RequestContextHEVC; ++ ++static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 }; ++ ++static void v4l2_request_hevc_fill_pred_table(const HEVCContext *h, struct v4l2_hevc_pred_weight_table *table) ++{ ++ int32_t luma_weight_denom, chroma_weight_denom; ++ const SliceHeader *sh = &h->sh; ++ ++ if (sh->slice_type == HEVC_SLICE_I || ++ (sh->slice_type == HEVC_SLICE_P && !h->ps.pps->weighted_pred_flag) || ++ (sh->slice_type == HEVC_SLICE_B && !h->ps.pps->weighted_bipred_flag)) ++ return; ++ ++ table->luma_log2_weight_denom = sh->luma_log2_weight_denom; ++ ++ if (h->ps.sps->chroma_format_idc) ++ table->delta_chroma_log2_weight_denom = sh->chroma_log2_weight_denom - sh->luma_log2_weight_denom; ++ ++ luma_weight_denom = (1 << sh->luma_log2_weight_denom); ++ chroma_weight_denom = (1 << sh->chroma_log2_weight_denom); ++ ++ for (int i = 0; i < 15 && i < sh->nb_refs[L0]; i++) { ++ table->delta_luma_weight_l0[i] = sh->luma_weight_l0[i] - luma_weight_denom; ++ table->luma_offset_l0[i] = sh->luma_offset_l0[i]; ++ table->delta_chroma_weight_l0[i][0] = sh->chroma_weight_l0[i][0] - chroma_weight_denom; ++ table->delta_chroma_weight_l0[i][1] = sh->chroma_weight_l0[i][1] - chroma_weight_denom; ++ table->chroma_offset_l0[i][0] = sh->chroma_offset_l0[i][0]; ++ table->chroma_offset_l0[i][1] = sh->chroma_offset_l0[i][1]; ++ } ++ ++ if (sh->slice_type != HEVC_SLICE_B) ++ return; ++ ++ for (int i = 0; i < 15 && i < sh->nb_refs[L1]; i++) { ++ table->delta_luma_weight_l1[i] = sh->luma_weight_l1[i] - luma_weight_denom; ++ table->luma_offset_l1[i] = sh->luma_offset_l1[i]; ++ table->delta_chroma_weight_l1[i][0] = sh->chroma_weight_l1[i][0] - chroma_weight_denom; ++ table->delta_chroma_weight_l1[i][1] = sh->chroma_weight_l1[i][1] - chroma_weight_denom; ++ table->chroma_offset_l1[i][0] = sh->chroma_offset_l1[i][0]; ++ table->chroma_offset_l1[i][1] = sh->chroma_offset_l1[i][1]; ++ } ++} ++ ++static int find_frame_rps_type(const HEVCContext *h, uint64_t timestamp) ++{ ++ const HEVCFrame *frame; ++ int i; ++ ++ for (i = 0; i < h->rps[ST_CURR_BEF].nb_refs; i++) { ++ frame = h->rps[ST_CURR_BEF].ref[i]; ++ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ return V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE; ++ } ++ ++ for (i = 0; i < h->rps[ST_CURR_AFT].nb_refs; i++) { ++ frame = h->rps[ST_CURR_AFT].ref[i]; ++ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ return V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER; ++ } ++ ++ for (i = 0; i < h->rps[LT_CURR].nb_refs; i++) { ++ frame = h->rps[LT_CURR].ref[i]; ++ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ return V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR; ++ } ++ ++ return 0; ++} ++ ++static uint8_t get_ref_pic_index(const HEVCContext *h, const HEVCFrame *frame, ++ struct v4l2_ctrl_hevc_slice_params *slice_params) ++{ ++ uint64_t timestamp; ++ ++ if (!frame) ++ return 0; ++ ++ timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame); ++ ++ for (uint8_t i = 0; i < slice_params->num_active_dpb_entries; i++) { ++ struct v4l2_hevc_dpb_entry *entry = &slice_params->dpb[i]; ++ if (entry->timestamp == timestamp) ++ return i; ++ } ++ ++ return 0; ++} ++ ++static void v4l2_request_hevc_fill_slice_params(const HEVCContext *h, ++ struct v4l2_ctrl_hevc_slice_params *slice_params) ++{ ++ const HEVCFrame *pic = h->ref; ++ const SliceHeader *sh = &h->sh; ++ int i, entries = 0; ++ RefPicList *rpl; ++ ++ *slice_params = (struct v4l2_ctrl_hevc_slice_params) { ++ .bit_size = 0, ++ .data_bit_offset = get_bits_count(&h->HEVClc->gb), ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ ++ .nal_unit_type = h->nal_unit_type, ++ .nuh_temporal_id_plus1 = h->temporal_id + 1, ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ .slice_type = sh->slice_type, ++ .colour_plane_id = sh->colour_plane_id, ++ .slice_pic_order_cnt = pic->poc, ++ .num_ref_idx_l0_active_minus1 = sh->nb_refs[L0] ? sh->nb_refs[L0] - 1 : 0, ++ .num_ref_idx_l1_active_minus1 = sh->nb_refs[L1] ? sh->nb_refs[L1] - 1 : 0, ++ .collocated_ref_idx = sh->slice_temporal_mvp_enabled_flag ? sh->collocated_ref_idx : 0, ++ .five_minus_max_num_merge_cand = sh->slice_type == HEVC_SLICE_I ? 0 : 5 - sh->max_num_merge_cand, ++ .slice_qp_delta = sh->slice_qp_delta, ++ .slice_cb_qp_offset = sh->slice_cb_qp_offset, ++ .slice_cr_qp_offset = sh->slice_cr_qp_offset, ++ .slice_act_y_qp_offset = 0, ++ .slice_act_cb_qp_offset = 0, ++ .slice_act_cr_qp_offset = 0, ++ .slice_beta_offset_div2 = sh->beta_offset / 2, ++ .slice_tc_offset_div2 = sh->tc_offset / 2, ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */ ++ .pic_struct = h->sei.picture_timing.picture_struct, ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ .num_rps_poc_st_curr_before = h->rps[ST_CURR_BEF].nb_refs, ++ .num_rps_poc_st_curr_after = h->rps[ST_CURR_AFT].nb_refs, ++ .num_rps_poc_lt_curr = h->rps[LT_CURR].nb_refs, ++ }; ++ ++ if (sh->slice_sample_adaptive_offset_flag[0]) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA; ++ ++ if (sh->slice_sample_adaptive_offset_flag[1]) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA; ++ ++ if (sh->slice_temporal_mvp_enabled_flag) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED; ++ ++ if (sh->mvd_l1_zero_flag) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO; ++ ++ if (sh->cabac_init_flag) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT; ++ ++ if (sh->collocated_list == L0) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0; ++ ++ if (sh->disable_deblocking_filter_flag) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED; ++ ++ if (sh->slice_loop_filter_across_slices_enabled_flag) ++ slice_params->flags |= V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED; ++ ++ for (i = 0; i < FF_ARRAY_ELEMS(h->DPB); i++) { ++ const HEVCFrame *frame = &h->DPB[i]; ++ if (frame != pic && (frame->flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF))) { ++ struct v4l2_hevc_dpb_entry *entry = &slice_params->dpb[entries++]; ++ ++ entry->timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame); ++ entry->rps = find_frame_rps_type(h, entry->timestamp); ++ entry->field_pic = frame->frame->interlaced_frame; ++ ++ /* TODO: Interleaved: Get the POC for each field. */ ++ entry->pic_order_cnt[0] = frame->poc; ++ entry->pic_order_cnt[1] = frame->poc; ++ } ++ } ++ ++ slice_params->num_active_dpb_entries = entries; ++ ++ if (sh->slice_type != HEVC_SLICE_I) { ++ rpl = &h->ref->refPicList[0]; ++ for (i = 0; i < rpl->nb_refs; i++) ++ slice_params->ref_idx_l0[i] = get_ref_pic_index(h, rpl->ref[i], slice_params); ++ } ++ ++ if (sh->slice_type == HEVC_SLICE_B) { ++ rpl = &h->ref->refPicList[1]; ++ for (i = 0; i < rpl->nb_refs; i++) ++ slice_params->ref_idx_l1[i] = get_ref_pic_index(h, rpl->ref[i], slice_params); ++ } ++ ++ v4l2_request_hevc_fill_pred_table(h, &slice_params->pred_weight_table); ++} ++ ++static void fill_sps(struct v4l2_ctrl_hevc_sps *ctrl, const HEVCContext *h) ++{ ++ const HEVCSPS *sps = h->ps.sps; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */ ++ *ctrl = (struct v4l2_ctrl_hevc_sps) { ++ .chroma_format_idc = sps->chroma_format_idc, ++ .pic_width_in_luma_samples = sps->width, ++ .pic_height_in_luma_samples = sps->height, ++ .bit_depth_luma_minus8 = sps->bit_depth - 8, ++ .bit_depth_chroma_minus8 = sps->bit_depth - 8, ++ .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4, ++ .sps_max_dec_pic_buffering_minus1 = sps->temporal_layer[sps->max_sub_layers - 1].max_dec_pic_buffering - 1, ++ .sps_max_num_reorder_pics = sps->temporal_layer[sps->max_sub_layers - 1].num_reorder_pics, ++ .sps_max_latency_increase_plus1 = sps->temporal_layer[sps->max_sub_layers - 1].max_latency_increase + 1, ++ .log2_min_luma_coding_block_size_minus3 = sps->log2_min_cb_size - 3, ++ .log2_diff_max_min_luma_coding_block_size = sps->log2_diff_max_min_coding_block_size, ++ .log2_min_luma_transform_block_size_minus2 = sps->log2_min_tb_size - 2, ++ .log2_diff_max_min_luma_transform_block_size = sps->log2_max_trafo_size - sps->log2_min_tb_size, ++ .max_transform_hierarchy_depth_inter = sps->max_transform_hierarchy_depth_inter, ++ .max_transform_hierarchy_depth_intra = sps->max_transform_hierarchy_depth_intra, ++ .pcm_sample_bit_depth_luma_minus1 = sps->pcm.bit_depth - 1, ++ .pcm_sample_bit_depth_chroma_minus1 = sps->pcm.bit_depth_chroma - 1, ++ .log2_min_pcm_luma_coding_block_size_minus3 = sps->pcm.log2_min_pcm_cb_size - 3, ++ .log2_diff_max_min_pcm_luma_coding_block_size = sps->pcm.log2_max_pcm_cb_size - sps->pcm.log2_min_pcm_cb_size, ++ .num_short_term_ref_pic_sets = sps->nb_st_rps, ++ .num_long_term_ref_pics_sps = sps->num_long_term_ref_pics_sps, ++ }; ++ ++ if (sps->separate_colour_plane_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE; ++ ++ if (sps->scaling_list_enable_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED; ++ ++ if (sps->amp_enabled_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_AMP_ENABLED; ++ ++ if (sps->sao_enabled) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET; ++ ++ if (sps->pcm_enabled_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_PCM_ENABLED; ++ ++ if (sps->pcm.loop_filter_disable_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED; ++ ++ if (sps->long_term_ref_pics_present_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT; ++ ++ if (sps->sps_temporal_mvp_enabled_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED; ++ ++ if (sps->sps_strong_intra_smoothing_enable_flag) ++ ctrl->flags |= V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED; ++} ++ ++static int v4l2_request_hevc_start_frame(AVCodecContext *avctx, ++ av_unused const uint8_t *buffer, ++ av_unused uint32_t size) ++{ ++ const HEVCContext *h = avctx->priv_data; ++ const HEVCSPS *sps = h->ps.sps; ++ const HEVCPPS *pps = h->ps.pps; ++ const ScalingList *sl = pps->scaling_list_data_present_flag ? ++ &pps->scaling_list : ++ sps->scaling_list_enable_flag ? ++ &sps->scaling_list : NULL; ++ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; ++ ++ fill_sps(&controls->sps, h); ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ ++ controls->pps = (struct v4l2_ctrl_hevc_pps) { ++ .num_extra_slice_header_bits = pps->num_extra_slice_header_bits, ++ .init_qp_minus26 = pps->pic_init_qp_minus26, ++ .diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth, ++ .pps_cb_qp_offset = pps->cb_qp_offset, ++ .pps_cr_qp_offset = pps->cr_qp_offset, ++ .pps_beta_offset_div2 = pps->beta_offset / 2, ++ .pps_tc_offset_div2 = pps->tc_offset / 2, ++ .log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2, ++ }; ++ ++ if (pps->dependent_slice_segments_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT; ++ ++ if (pps->output_flag_present_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT; ++ ++ if (pps->sign_data_hiding_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED; ++ ++ if (pps->cabac_init_present_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT; ++ ++ if (pps->constrained_intra_pred_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED; ++ ++ if (pps->transform_skip_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED; ++ ++ if (pps->cu_qp_delta_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED; ++ ++ if (pps->pic_slice_level_chroma_qp_offsets_present_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT; ++ ++ if (pps->weighted_pred_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED; ++ ++ if (pps->weighted_bipred_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED; ++ ++ if (pps->transquant_bypass_enable_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED; ++ ++ if (pps->tiles_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TILES_ENABLED; ++ ++ if (pps->entropy_coding_sync_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED; ++ ++ if (pps->loop_filter_across_tiles_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; ++ ++ if (pps->seq_loop_filter_across_slices_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED; ++ ++ if (pps->deblocking_filter_override_enabled_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED; ++ ++ if (pps->disable_dbf) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER; ++ ++ if (pps->lists_modification_present_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT; ++ ++ if (pps->slice_header_extension_present_flag) ++ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT; ++ ++ if (pps->tiles_enabled_flag) { ++ controls->pps.num_tile_columns_minus1 = pps->num_tile_columns - 1; ++ controls->pps.num_tile_rows_minus1 = pps->num_tile_rows - 1; ++ ++ for (int i = 0; i < pps->num_tile_columns; i++) ++ controls->pps.column_width_minus1[i] = pps->column_width[i] - 1; ++ ++ for (int i = 0; i < pps->num_tile_rows; i++) ++ controls->pps.row_height_minus1[i] = pps->row_height[i] - 1; ++ } ++ ++ controls->first_slice = 1; ++ controls->num_slices = 0; ++ ++ return ff_v4l2_request_reset_frame(avctx, h->ref->frame); ++} ++ ++static int v4l2_request_hevc_queue_decode(AVCodecContext *avctx, int last_slice) ++{ ++ const HEVCContext *h = avctx->priv_data; ++ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; ++ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS, ++ .ptr = &controls->sps, ++ .size = sizeof(controls->sps), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_PPS, ++ .ptr = &controls->pps, ++ .size = sizeof(controls->pps), ++ }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, ++ .ptr = &controls->slice_params, ++ .size = sizeof(controls->slice_params[0]) * FFMAX(FFMIN(controls->num_slices, MAX_SLICES), ctx->max_slices), ++ }, ++ }; ++ ++ if (ctx->decode_mode == V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED) ++ return ff_v4l2_request_decode_slice(avctx, h->ref->frame, control, FF_ARRAY_ELEMS(control), controls->first_slice, last_slice); ++ ++ return ff_v4l2_request_decode_frame(avctx, h->ref->frame, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_hevc_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) ++{ ++ const HEVCContext *h = avctx->priv_data; ++ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; ++ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)h->ref->frame->data[0]; ++ int ret, slice = FFMIN(controls->num_slices, MAX_SLICES - 1); ++ ++ if (ctx->decode_mode == V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED && slice) { ++ ret = v4l2_request_hevc_queue_decode(avctx, 0); ++ if (ret) ++ return ret; ++ ++ ff_v4l2_request_reset_frame(avctx, h->ref->frame); ++ slice = controls->num_slices = 0; ++ controls->first_slice = 0; ++ } ++ ++ v4l2_request_hevc_fill_slice_params(h, &controls->slice_params[slice]); ++ ++ if (ctx->start_code == V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B) { ++ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, nalu_slice_start_code, 3); ++ if (ret) ++ return ret; ++ } ++ ++ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, buffer, size); ++ if (ret) ++ return ret; ++ ++ controls->slice_params[slice].bit_size = req->output.used * 8; //FIXME ++ controls->num_slices++; ++ return 0; ++} ++ ++static int v4l2_request_hevc_end_frame(AVCodecContext *avctx) ++{ ++ return v4l2_request_hevc_queue_decode(avctx, 1); ++} ++ ++static int v4l2_request_hevc_set_controls(AVCodecContext *avctx) ++{ ++ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ int ret; ++ ++ struct v4l2_ext_control control[] = { ++ { .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE, }, ++ { .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE, }, ++ }; ++ struct v4l2_query_ext_ctrl slice_params = { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, ++ }; ++ ++ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE); ++ if (ctx->decode_mode != V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED && ++ ctx->decode_mode != V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode); ++ return AVERROR(EINVAL); ++ } ++ ++ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_HEVC_START_CODE); ++ if (ctx->start_code != V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE && ++ ctx->start_code != V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code); ++ return AVERROR(EINVAL); ++ } ++ ++ ret = ff_v4l2_request_query_control(avctx, &slice_params); ++ if (ret) ++ return ret; ++ ++ ctx->max_slices = slice_params.elems; ++ if (ctx->max_slices > MAX_SLICES) { ++ av_log(avctx, AV_LOG_ERROR, "%s: unsupported max slices, %d\n", __func__, ctx->max_slices); ++ return AVERROR(EINVAL); ++ } ++ ++ control[0].value = ctx->decode_mode; ++ control[1].value = ctx->start_code; ++ ++ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_hevc_init(AVCodecContext *avctx) ++{ ++ const HEVCContext *h = avctx->priv_data; ++ struct v4l2_ctrl_hevc_sps sps; ++ int ret; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS, ++ .ptr = &sps, ++ .size = sizeof(sps), ++ }, ++ }; ++ ++ fill_sps(&sps, h); ++ ++ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_HEVC_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control)); ++ if (ret) ++ return ret; ++ ++ return v4l2_request_hevc_set_controls(avctx); ++} ++ ++const AVHWAccel ff_hevc_v4l2request_hwaccel = { ++ .name = "hevc_v4l2request", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .id = AV_CODEC_ID_HEVC, ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .start_frame = v4l2_request_hevc_start_frame, ++ .decode_slice = v4l2_request_hevc_decode_slice, ++ .end_frame = v4l2_request_hevc_end_frame, ++ .frame_priv_data_size = sizeof(V4L2RequestControlsHEVC), ++ .init = v4l2_request_hevc_init, ++ .uninit = ff_v4l2_request_uninit, ++ .priv_data_size = sizeof(V4L2RequestContextHEVC), ++ .frame_params = ff_v4l2_request_frame_params, ++ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++}; + +From 11f2c3d30ce6085ef1510f51481852bab1380ecd Mon Sep 17 00:00:00 2001 +From: Boris Brezillon +Date: Wed, 22 May 2019 14:46:58 +0200 +Subject: [PATCH 06/18] Add V4L2 request API vp8 hwaccel + +Need to fix the STREAMOFF/STREAMON issue in a proper way. + +Signed-off-by: Boris Brezillon +Signed-off-by: Ezequiel Garcia +--- + configure | 3 + + libavcodec/Makefile | 1 + + libavcodec/hwaccels.h | 1 + + libavcodec/v4l2_request_vp8.c | 180 ++++++++++++++++++++++++++++++++++ + libavcodec/vp8.c | 8 +- + 5 files changed, 192 insertions(+), 1 deletion(-) + create mode 100644 libavcodec/v4l2_request_vp8.c + +diff --git a/configure b/configure +index 58abd99335..cbb91c2bca 100755 +--- a/configure ++++ b/configure +@@ -3003,6 +3003,8 @@ vc1_vdpau_hwaccel_deps="vdpau" + vc1_vdpau_hwaccel_select="vc1_decoder" + vp8_nvdec_hwaccel_deps="nvdec" + vp8_nvdec_hwaccel_select="vp8_decoder" ++vp8_v4l2request_hwaccel_deps="v4l2_request vp8_v4l2_request" ++vp8_v4l2request_hwaccel_select="vp8_decoder" + vp8_vaapi_hwaccel_deps="vaapi" + vp8_vaapi_hwaccel_select="vp8_decoder" + vp9_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_VP9" +@@ -6576,6 +6578,7 @@ check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns + check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;" + check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;" + check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" ++check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;" + + check_headers sys/videoio.h + test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index 5d0e1d7dae..d6af854daa 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -940,6 +940,7 @@ OBJS-$(CONFIG_VC1_QSV_HWACCEL) += qsvdec_other.o + OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o + OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o + OBJS-$(CONFIG_VP8_NVDEC_HWACCEL) += nvdec_vp8.o ++OBJS-$(CONFIG_VP8_V4L2REQUEST_HWACCEL) += v4l2_request_vp8.o + OBJS-$(CONFIG_VP8_VAAPI_HWACCEL) += vaapi_vp8.o + OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o + OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o +diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h +index e2f90a5fdd..bd75e94f4c 100644 +--- a/libavcodec/hwaccels.h ++++ b/libavcodec/hwaccels.h +@@ -65,6 +65,7 @@ extern const AVHWAccel ff_vc1_nvdec_hwaccel; + extern const AVHWAccel ff_vc1_vaapi_hwaccel; + extern const AVHWAccel ff_vc1_vdpau_hwaccel; + extern const AVHWAccel ff_vp8_nvdec_hwaccel; ++extern const AVHWAccel ff_vp8_v4l2request_hwaccel; + extern const AVHWAccel ff_vp8_vaapi_hwaccel; + extern const AVHWAccel ff_vp9_d3d11va_hwaccel; + extern const AVHWAccel ff_vp9_d3d11va2_hwaccel; +diff --git a/libavcodec/v4l2_request_vp8.c b/libavcodec/v4l2_request_vp8.c +new file mode 100644 +index 0000000000..7e75ee398a +--- /dev/null ++++ b/libavcodec/v4l2_request_vp8.c +@@ -0,0 +1,180 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "hwconfig.h" ++#include "v4l2_request.h" ++#include "vp8.h" ++ ++typedef struct V4L2RequestControlsVP8 { ++ struct v4l2_ctrl_vp8_frame_header ctrl; ++} V4L2RequestControlsVP8; ++ ++static int v4l2_request_vp8_start_frame(AVCodecContext *avctx, ++ av_unused const uint8_t *buffer, ++ av_unused uint32_t size) ++{ ++ const VP8Context *s = avctx->priv_data; ++ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; ++ ++ memset(&controls->ctrl, 0, sizeof(controls->ctrl)); ++ return ff_v4l2_request_reset_frame(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f); ++} ++ ++static int v4l2_request_vp8_end_frame(AVCodecContext *avctx) ++{ ++ const VP8Context *s = avctx->priv_data; ++ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER, ++ .ptr = &controls->ctrl, ++ .size = sizeof(controls->ctrl), ++ }, ++ }; ++ ++ return ff_v4l2_request_decode_frame(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f, ++ control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_vp8_decode_slice(AVCodecContext *avctx, ++ const uint8_t *buffer, ++ uint32_t size) ++{ ++ const VP8Context *s = avctx->priv_data; ++ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; ++ struct v4l2_ctrl_vp8_frame_header *hdr = &controls->ctrl; ++ const uint8_t *data = buffer + 3 + 7 * s->keyframe; ++ unsigned int i, j, k; ++ ++ hdr->version = s->profile & 0x3; ++ hdr->width = avctx->width; ++ hdr->height = avctx->height; ++ /* FIXME: set ->xx_scale */ ++ hdr->prob_skip_false = s->prob->mbskip; ++ hdr->prob_intra = s->prob->intra; ++ hdr->prob_gf = s->prob->golden; ++ hdr->prob_last = s->prob->last; ++ hdr->first_part_size = s->header_partition_size; ++ hdr->first_part_header_bits = (8 * (s->coder_state_at_header_end.input - data) - ++ s->coder_state_at_header_end.bit_count - 8); ++ hdr->num_dct_parts = s->num_coeff_partitions; ++ for (i = 0; i < 8; i++) ++ hdr->dct_part_sizes[i] = s->coeff_partition_size[i]; ++ ++ hdr->coder_state.range = s->coder_state_at_header_end.range; ++ hdr->coder_state.value = s->coder_state_at_header_end.value; ++ hdr->coder_state.bit_count = s->coder_state_at_header_end.bit_count; ++ if (s->framep[VP56_FRAME_PREVIOUS]) ++ hdr->last_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_PREVIOUS]->tf.f); ++ if (s->framep[VP56_FRAME_GOLDEN]) ++ hdr->golden_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_GOLDEN]->tf.f); ++ if (s->framep[VP56_FRAME_GOLDEN2]) ++ hdr->alt_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_GOLDEN2]->tf.f); ++ hdr->flags |= s->invisible ? 0 : V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME; ++ hdr->flags |= s->mbskip_enabled ? V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF : 0; ++ hdr->flags |= (s->profile & 0x4) ? V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL : 0; ++ hdr->flags |= s->keyframe ? V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME : 0; ++ hdr->flags |= s->sign_bias[VP56_FRAME_GOLDEN] ? V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN : 0; ++ hdr->flags |= s->sign_bias[VP56_FRAME_GOLDEN2] ? V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT : 0; ++ hdr->segment_header.flags |= s->segmentation.enabled ? V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED : 0; ++ hdr->segment_header.flags |= s->segmentation.update_map ? V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP : 0; ++ hdr->segment_header.flags |= s->segmentation.update_feature_data ? V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA : 0; ++ hdr->segment_header.flags |= s->segmentation.absolute_vals ? 0 : V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE; ++ for (i = 0; i < 4; i++) { ++ hdr->segment_header.quant_update[i] = s->segmentation.base_quant[i]; ++ hdr->segment_header.lf_update[i] = s->segmentation.filter_level[i]; ++ } ++ ++ for (i = 0; i < 3; i++) ++ hdr->segment_header.segment_probs[i] = s->prob->segmentid[i]; ++ ++ hdr->lf_header.level = s->filter.level; ++ hdr->lf_header.sharpness_level = s->filter.sharpness; ++ hdr->lf_header.flags |= s->lf_delta.enabled ? V4L2_VP8_LF_HEADER_ADJ_ENABLE : 0; ++ hdr->lf_header.flags |= s->lf_delta.update ? V4L2_VP8_LF_HEADER_DELTA_UPDATE : 0; ++ hdr->lf_header.flags |= s->filter.simple ? V4L2_VP8_LF_FILTER_TYPE_SIMPLE : 0; ++ for (i = 0; i < 4; i++) { ++ hdr->lf_header.ref_frm_delta[i] = s->lf_delta.ref[i]; ++ hdr->lf_header.mb_mode_delta[i] = s->lf_delta.mode[i + MODE_I4x4]; ++ } ++ ++ // Probabilites ++ if (s->keyframe) { ++ static const uint8_t keyframe_y_mode_probs[4] = { ++ 145, 156, 163, 128 ++ }; ++ static const uint8_t keyframe_uv_mode_probs[3] = { ++ 142, 114, 183 ++ }; ++ ++ memcpy(hdr->entropy_header.y_mode_probs, keyframe_y_mode_probs, 4); ++ memcpy(hdr->entropy_header.uv_mode_probs, keyframe_uv_mode_probs, 3); ++ } else { ++ for (i = 0; i < 4; i++) ++ hdr->entropy_header.y_mode_probs[i] = s->prob->pred16x16[i]; ++ for (i = 0; i < 3; i++) ++ hdr->entropy_header.uv_mode_probs[i] = s->prob->pred8x8c[i]; ++ } ++ for (i = 0; i < 2; i++) ++ for (j = 0; j < 19; j++) ++ hdr->entropy_header.mv_probs[i][j] = s->prob->mvc[i][j]; ++ ++ for (i = 0; i < 4; i++) { ++ for (j = 0; j < 8; j++) { ++ static const int coeff_bands_inverse[8] = { ++ 0, 1, 2, 3, 5, 6, 4, 15 ++ }; ++ int coeff_pos = coeff_bands_inverse[j]; ++ ++ for (k = 0; k < 3; k++) { ++ memcpy(hdr->entropy_header.coeff_probs[i][j][k], ++ s->prob->token[i][coeff_pos][k], 11); ++ } ++ } ++ } ++ ++ hdr->quant_header.y_ac_qi = s->quant.yac_qi; ++ hdr->quant_header.y_dc_delta = s->quant.ydc_delta; ++ hdr->quant_header.y2_dc_delta = s->quant.y2dc_delta; ++ hdr->quant_header.y2_ac_delta = s->quant.y2ac_delta; ++ hdr->quant_header.uv_dc_delta = s->quant.uvdc_delta; ++ hdr->quant_header.uv_ac_delta = s->quant.uvac_delta; ++ ++ return ff_v4l2_request_append_output_buffer(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f, buffer, size); ++} ++ ++static int v4l2_request_vp8_init(AVCodecContext *avctx) ++{ ++ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP8_FRAME, 2 * 1024 * 1024, NULL, 0); ++} ++ ++const AVHWAccel ff_vp8_v4l2request_hwaccel = { ++ .name = "vp8_v4l2request", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .id = AV_CODEC_ID_VP8, ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .start_frame = v4l2_request_vp8_start_frame, ++ .decode_slice = v4l2_request_vp8_decode_slice, ++ .end_frame = v4l2_request_vp8_end_frame, ++ .frame_priv_data_size = sizeof(V4L2RequestControlsVP8), ++ .init = v4l2_request_vp8_init, ++ .uninit = ff_v4l2_request_uninit, ++ .priv_data_size = sizeof(V4L2RequestContext), ++ .frame_params = ff_v4l2_request_frame_params, ++ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++}; +diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c +index bab4223aca..0e1edb46fb 100644 +--- a/libavcodec/vp8.c ++++ b/libavcodec/vp8.c +@@ -175,6 +175,9 @@ static enum AVPixelFormat get_pixel_format(VP8Context *s) + #endif + #if CONFIG_VP8_NVDEC_HWACCEL + AV_PIX_FMT_CUDA, ++#endif ++#if CONFIG_VP8_V4L2REQUEST_HWACCEL ++ AV_PIX_FMT_DRM_PRIME, + #endif + AV_PIX_FMT_YUV420P, + AV_PIX_FMT_NONE, +@@ -198,7 +201,7 @@ int update_dimensions(VP8Context *s, int width, int height, int is_vp7) + return ret; + } + +- if (!s->actually_webp && !is_vp7) { ++ if (!s->actually_webp && !is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) { + s->pix_fmt = get_pixel_format(s); + if (s->pix_fmt < 0) + return AVERROR(EINVAL); +@@ -2968,6 +2971,9 @@ AVCodec ff_vp8_decoder = { + #endif + #if CONFIG_VP8_NVDEC_HWACCEL + HWACCEL_NVDEC(vp8), ++#endif ++#if CONFIG_VP8_V4L2REQUEST_HWACCEL ++ HWACCEL_V4L2REQUEST(vp8), + #endif + NULL + }, + +From d1cbb6de7dd7462fb696160612ae45623c61265c Mon Sep 17 00:00:00 2001 +From: Ezequiel Garcia +Date: Wed, 20 Feb 2019 11:18:00 -0300 +Subject: [PATCH 07/18] avcodec/h264: parse idr_pic_id + +Signed-off-by: Ezequiel Garcia +--- + libavcodec/h264_slice.c | 2 +- + libavcodec/h264dec.h | 2 ++ + libavcodec/v4l2_request_h264.c | 2 +- + 3 files changed, 4 insertions(+), 2 deletions(-) + +diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c +index 3ae11ac8a7..96e8edd102 100644 +--- a/libavcodec/h264_slice.c ++++ b/libavcodec/h264_slice.c +@@ -1822,7 +1822,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, + } + + if (nal->type == H264_NAL_IDR_SLICE) +- get_ue_golomb_long(&sl->gb); /* idr_pic_id */ ++ sl->idr_pic_id = get_ue_golomb_long(&sl->gb); + + if (sps->poc_type == 0) { + sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); +diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h +index a419615124..aebc5ed2f6 100644 +--- a/libavcodec/h264dec.h ++++ b/libavcodec/h264dec.h +@@ -190,6 +190,8 @@ typedef struct H264SliceContext { + int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P) + int slice_type_fixed; + ++ int idr_pic_id; ++ + int qscale; + int chroma_qp[2]; // QPc + int qp_thresh; ///< QP threshold to skip loopfilter +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +index 94b9aca8ad..9382e573b4 100644 +--- a/libavcodec/v4l2_request_h264.c ++++ b/libavcodec/v4l2_request_h264.c +@@ -303,7 +303,7 @@ static int v4l2_request_h264_decode_slice(AVCodecContext *avctx, const uint8_t * + .pic_parameter_set_id = sl->pps_id, + .colour_plane_id = 0, /* what is this? */ + .frame_num = h->poc.frame_num, +- .idr_pic_id = 0, /* what is this? */ ++ .idr_pic_id = sl->idr_pic_id, + .pic_order_cnt_lsb = sl->poc_lsb, + .delta_pic_order_cnt_bottom = sl->delta_poc_bottom, + .delta_pic_order_cnt0 = sl->delta_poc[0], + +From 88da95ec126bdf5ffbc4399c7e453a2248e79119 Mon Sep 17 00:00:00 2001 +From: Boris Brezillon +Date: Wed, 22 May 2019 14:44:22 +0200 +Subject: [PATCH 08/18] avcodec/h264: parse ref_pic_marking_size_in_bits and + pic_order_cnt_bit_size + +Signed-off-by: Boris Brezillon +--- + libavcodec/h264_slice.c | 6 +++++- + libavcodec/h264dec.h | 2 ++ + libavcodec/v4l2_request_h264.c | 4 ++-- + 3 files changed, 9 insertions(+), 3 deletions(-) + +diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c +index 96e8edd102..c3896cfd90 100644 +--- a/libavcodec/h264_slice.c ++++ b/libavcodec/h264_slice.c +@@ -1740,7 +1740,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, + unsigned int slice_type, tmp, i; + int field_pic_flag, bottom_field_flag; + int first_slice = sl == h->slice_ctx && !h->current_slice; +- int picture_structure; ++ int picture_structure, pos; + + if (first_slice) + av_assert0(!h->setup_finished); +@@ -1824,6 +1824,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, + if (nal->type == H264_NAL_IDR_SLICE) + sl->idr_pic_id = get_ue_golomb_long(&sl->gb); + ++ pos = sl->gb.index; + if (sps->poc_type == 0) { + sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); + +@@ -1837,6 +1838,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, + if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) + sl->delta_poc[1] = get_se_golomb(&sl->gb); + } ++ sl->pic_order_cnt_bit_size = sl->gb.index - pos; + + sl->redundant_pic_count = 0; + if (pps->redundant_pic_cnt_present) +@@ -1876,9 +1878,11 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, + + sl->explicit_ref_marking = 0; + if (nal->ref_idc) { ++ int bit_pos = sl->gb.index; + ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx); + if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) + return AVERROR_INVALIDDATA; ++ sl->ref_pic_marking_size_in_bits = sl->gb.index - bit_pos; + } + + if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { +diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h +index aebc5ed2f6..b3dcd6e7da 100644 +--- a/libavcodec/h264dec.h ++++ b/libavcodec/h264dec.h +@@ -330,11 +330,13 @@ typedef struct H264SliceContext { + MMCO mmco[MAX_MMCO_COUNT]; + int nb_mmco; + int explicit_ref_marking; ++ int ref_pic_marking_size_in_bits; + + int frame_num; + int poc_lsb; + int delta_poc_bottom; + int delta_poc[2]; ++ int pic_order_cnt_bit_size; + int curr_pic_num; + int max_pic_num; + } H264SliceContext; +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +index 9382e573b4..bdaeb67d26 100644 +--- a/libavcodec/v4l2_request_h264.c ++++ b/libavcodec/v4l2_request_h264.c +@@ -311,9 +311,9 @@ static int v4l2_request_h264_decode_slice(AVCodecContext *avctx, const uint8_t * + .redundant_pic_cnt = sl->redundant_pic_count, + + /* Size in bits of dec_ref_pic_marking() syntax element. */ +- .dec_ref_pic_marking_bit_size = 0, ++ .dec_ref_pic_marking_bit_size = sl->ref_pic_marking_size_in_bits, + /* Size in bits of pic order count syntax. */ +- .pic_order_cnt_bit_size = 0, ++ .pic_order_cnt_bit_size = sl->pic_order_cnt_bit_size, + + .cabac_init_idc = sl->cabac_init_idc, + .slice_qp_delta = sl->qscale - pps->init_qp, + +From afd9b1ffb7a2805423d888c49ea13ee5ffc95994 Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Thu, 14 Feb 2019 23:20:05 +0100 +Subject: [PATCH 09/18] Add and use private linux headers for V4L2 request API + ctrls + +Signed-off-by: Jernej Skrabec +--- + configure | 6 +- + libavcodec/h264-ctrls.h | 210 +++++++++++++++++++++++++++++++ + libavcodec/hevc-ctrls.h | 212 ++++++++++++++++++++++++++++++++ + libavcodec/mpeg2-ctrls.h | 82 ++++++++++++ + libavcodec/v4l2_request_h264.c | 1 + + libavcodec/v4l2_request_hevc.c | 1 + + libavcodec/v4l2_request_mpeg2.c | 1 + + libavcodec/v4l2_request_vp8.c | 1 + + libavcodec/vp8-ctrls.h | 112 +++++++++++++++++ + 9 files changed, 623 insertions(+), 3 deletions(-) + create mode 100644 libavcodec/h264-ctrls.h + create mode 100644 libavcodec/hevc-ctrls.h + create mode 100644 libavcodec/mpeg2-ctrls.h + create mode 100644 libavcodec/vp8-ctrls.h + +diff --git a/configure b/configure +index cbb91c2bca..623012757c 100755 +--- a/configure ++++ b/configure +@@ -2925,7 +2925,7 @@ h264_dxva2_hwaccel_deps="dxva2" + h264_dxva2_hwaccel_select="h264_decoder" + h264_nvdec_hwaccel_deps="nvdec" + h264_nvdec_hwaccel_select="h264_decoder" +-h264_v4l2request_hwaccel_deps="v4l2_request h264_v4l2_request" ++h264_v4l2request_hwaccel_deps="v4l2_request" + h264_v4l2request_hwaccel_select="h264_decoder" + h264_vaapi_hwaccel_deps="vaapi" + h264_vaapi_hwaccel_select="h264_decoder" +@@ -2941,7 +2941,7 @@ hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC" + hevc_dxva2_hwaccel_select="hevc_decoder" + hevc_nvdec_hwaccel_deps="nvdec" + hevc_nvdec_hwaccel_select="hevc_decoder" +-hevc_v4l2request_hwaccel_deps="v4l2_request hevc_v4l2_request" ++hevc_v4l2request_hwaccel_deps="v4l2_request" + hevc_v4l2request_hwaccel_select="hevc_decoder" + hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC" + hevc_vaapi_hwaccel_select="hevc_decoder" +@@ -3003,7 +3003,7 @@ vc1_vdpau_hwaccel_deps="vdpau" + vc1_vdpau_hwaccel_select="vc1_decoder" + vp8_nvdec_hwaccel_deps="nvdec" + vp8_nvdec_hwaccel_select="vp8_decoder" +-vp8_v4l2request_hwaccel_deps="v4l2_request vp8_v4l2_request" ++vp8_v4l2request_hwaccel_deps="v4l2_request" + vp8_v4l2request_hwaccel_select="vp8_decoder" + vp8_vaapi_hwaccel_deps="vaapi" + vp8_vaapi_hwaccel_select="vp8_decoder" +diff --git a/libavcodec/h264-ctrls.h b/libavcodec/h264-ctrls.h +new file mode 100644 +index 0000000000..e877bf1d53 +--- /dev/null ++++ b/libavcodec/h264-ctrls.h +@@ -0,0 +1,210 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * These are the H.264 state controls for use with stateless H.264 ++ * codec drivers. ++ * ++ * It turns out that these structs are not stable yet and will undergo ++ * more changes. So keep them private until they are stable and ready to ++ * become part of the official public API. ++ */ ++ ++#ifndef _H264_CTRLS_H_ ++#define _H264_CTRLS_H_ ++ ++#include ++ ++/* Our pixel format isn't stable at the moment */ ++#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ ++ ++/* ++ * This is put insanely high to avoid conflicting with controls that ++ * would be added during the phase where those controls are not ++ * stable. It should be fixed eventually. ++ */ ++#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+1000) ++#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+1001) ++#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+1002) ++#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (V4L2_CID_MPEG_BASE+1003) ++#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS (V4L2_CID_MPEG_BASE+1004) ++#define V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE (V4L2_CID_MPEG_BASE+1005) ++#define V4L2_CID_MPEG_VIDEO_H264_START_CODE (V4L2_CID_MPEG_BASE+1006) ++ ++/* enum v4l2_ctrl_type type values */ ++#define V4L2_CTRL_TYPE_H264_SPS 0x0110 ++#define V4L2_CTRL_TYPE_H264_PPS 0x0111 ++#define V4L2_CTRL_TYPE_H264_SCALING_MATRIX 0x0112 ++#define V4L2_CTRL_TYPE_H264_SLICE_PARAMS 0x0113 ++#define V4L2_CTRL_TYPE_H264_DECODE_PARAMS 0x0114 ++ ++enum v4l2_mpeg_video_h264_decode_mode { ++ V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED, ++ V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED, ++}; ++ ++enum v4l2_mpeg_video_h264_start_code { ++ V4L2_MPEG_VIDEO_H264_START_CODE_NONE, ++ V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B, ++}; ++ ++#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01 ++#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02 ++#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04 ++#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08 ++#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10 ++#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20 ++ ++#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01 ++#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02 ++#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04 ++#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08 ++#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10 ++#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20 ++#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40 ++ ++struct v4l2_ctrl_h264_sps { ++ __u8 profile_idc; ++ __u8 constraint_set_flags; ++ __u8 level_idc; ++ __u8 seq_parameter_set_id; ++ __u8 chroma_format_idc; ++ __u8 bit_depth_luma_minus8; ++ __u8 bit_depth_chroma_minus8; ++ __u8 log2_max_frame_num_minus4; ++ __u8 pic_order_cnt_type; ++ __u8 log2_max_pic_order_cnt_lsb_minus4; ++ __u8 max_num_ref_frames; ++ __u8 num_ref_frames_in_pic_order_cnt_cycle; ++ __s32 offset_for_ref_frame[255]; ++ __s32 offset_for_non_ref_pic; ++ __s32 offset_for_top_to_bottom_field; ++ __u16 pic_width_in_mbs_minus1; ++ __u16 pic_height_in_map_units_minus1; ++ __u32 flags; ++}; ++ ++#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001 ++#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002 ++#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004 ++#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008 ++#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010 ++#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020 ++#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040 ++#define V4L2_H264_PPS_FLAG_PIC_SCALING_MATRIX_PRESENT 0x0080 ++ ++struct v4l2_ctrl_h264_pps { ++ __u8 pic_parameter_set_id; ++ __u8 seq_parameter_set_id; ++ __u8 num_slice_groups_minus1; ++ __u8 num_ref_idx_l0_default_active_minus1; ++ __u8 num_ref_idx_l1_default_active_minus1; ++ __u8 weighted_bipred_idc; ++ __s8 pic_init_qp_minus26; ++ __s8 pic_init_qs_minus26; ++ __s8 chroma_qp_index_offset; ++ __s8 second_chroma_qp_index_offset; ++ __u16 flags; ++}; ++ ++struct v4l2_ctrl_h264_scaling_matrix { ++ __u8 scaling_list_4x4[6][16]; ++ __u8 scaling_list_8x8[6][64]; ++}; ++ ++struct v4l2_h264_weight_factors { ++ __s16 luma_weight[32]; ++ __s16 luma_offset[32]; ++ __s16 chroma_weight[32][2]; ++ __s16 chroma_offset[32][2]; ++}; ++ ++struct v4l2_h264_pred_weight_table { ++ __u16 luma_log2_weight_denom; ++ __u16 chroma_log2_weight_denom; ++ struct v4l2_h264_weight_factors weight_factors[2]; ++}; ++ ++#define V4L2_H264_SLICE_TYPE_P 0 ++#define V4L2_H264_SLICE_TYPE_B 1 ++#define V4L2_H264_SLICE_TYPE_I 2 ++#define V4L2_H264_SLICE_TYPE_SP 3 ++#define V4L2_H264_SLICE_TYPE_SI 4 ++ ++#define V4L2_H264_SLICE_FLAG_FIELD_PIC 0x01 ++#define V4L2_H264_SLICE_FLAG_BOTTOM_FIELD 0x02 ++#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x04 ++#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x08 ++ ++struct v4l2_ctrl_h264_slice_params { ++ /* Size in bytes, including header */ ++ __u32 size; ++ ++ /* Offset in bytes to the start of slice in the OUTPUT buffer. */ ++ __u32 start_byte_offset; ++ ++ /* Offset in bits to slice_data() from the beginning of this slice. */ ++ __u32 header_bit_size; ++ ++ __u16 first_mb_in_slice; ++ __u8 slice_type; ++ __u8 pic_parameter_set_id; ++ __u8 colour_plane_id; ++ __u8 redundant_pic_cnt; ++ __u16 frame_num; ++ __u16 idr_pic_id; ++ __u16 pic_order_cnt_lsb; ++ __s32 delta_pic_order_cnt_bottom; ++ __s32 delta_pic_order_cnt0; ++ __s32 delta_pic_order_cnt1; ++ ++ struct v4l2_h264_pred_weight_table pred_weight_table; ++ /* Size in bits of dec_ref_pic_marking() syntax element. */ ++ __u32 dec_ref_pic_marking_bit_size; ++ /* Size in bits of pic order count syntax. */ ++ __u32 pic_order_cnt_bit_size; ++ ++ __u8 cabac_init_idc; ++ __s8 slice_qp_delta; ++ __s8 slice_qs_delta; ++ __u8 disable_deblocking_filter_idc; ++ __s8 slice_alpha_c0_offset_div2; ++ __s8 slice_beta_offset_div2; ++ __u8 num_ref_idx_l0_active_minus1; ++ __u8 num_ref_idx_l1_active_minus1; ++ __u32 slice_group_change_cycle; ++ ++ /* ++ * Entries on each list are indices into ++ * v4l2_ctrl_h264_decode_params.dpb[]. ++ */ ++ __u8 ref_pic_list0[32]; ++ __u8 ref_pic_list1[32]; ++ ++ __u32 flags; ++}; ++ ++#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 ++#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 ++#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 ++ ++struct v4l2_h264_dpb_entry { ++ __u64 reference_ts; ++ __u16 frame_num; ++ __u16 pic_num; ++ /* Note that field is indicated by v4l2_buffer.field */ ++ __s32 top_field_order_cnt; ++ __s32 bottom_field_order_cnt; ++ __u32 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */ ++}; ++ ++#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 ++ ++struct v4l2_ctrl_h264_decode_params { ++ struct v4l2_h264_dpb_entry dpb[16]; ++ __u16 num_slices; ++ __u16 nal_ref_idc; ++ __s32 top_field_order_cnt; ++ __s32 bottom_field_order_cnt; ++ __u32 flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */ ++}; ++ ++#endif +diff --git a/libavcodec/hevc-ctrls.h b/libavcodec/hevc-ctrls.h +new file mode 100644 +index 0000000000..1009cf0891 +--- /dev/null ++++ b/libavcodec/hevc-ctrls.h +@@ -0,0 +1,212 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * These are the HEVC state controls for use with stateless HEVC ++ * codec drivers. ++ * ++ * It turns out that these structs are not stable yet and will undergo ++ * more changes. So keep them private until they are stable and ready to ++ * become part of the official public API. ++ */ ++ ++#ifndef _HEVC_CTRLS_H_ ++#define _HEVC_CTRLS_H_ ++ ++#include ++ ++/* The pixel format isn't stable at the moment and will likely be renamed. */ ++#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */ ++ ++#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008) ++#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009) ++#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010) ++#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015) ++#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016) ++ ++/* enum v4l2_ctrl_type type values */ ++#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120 ++#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121 ++#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122 ++ ++enum v4l2_mpeg_video_hevc_decode_mode { ++ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, ++ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED, ++}; ++ ++enum v4l2_mpeg_video_hevc_start_code { ++ V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE, ++ V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B, ++}; ++ ++#define V4L2_HEVC_SLICE_TYPE_B 0 ++#define V4L2_HEVC_SLICE_TYPE_P 1 ++#define V4L2_HEVC_SLICE_TYPE_I 2 ++ ++#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0) ++#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1) ++#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2) ++#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3) ++#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4) ++#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5) ++#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6) ++#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7) ++#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8) ++ ++/* The controls are not stable at the moment and will likely be reworked. */ ++struct v4l2_ctrl_hevc_sps { ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */ ++ __u16 pic_width_in_luma_samples; ++ __u16 pic_height_in_luma_samples; ++ __u8 bit_depth_luma_minus8; ++ __u8 bit_depth_chroma_minus8; ++ __u8 log2_max_pic_order_cnt_lsb_minus4; ++ __u8 sps_max_dec_pic_buffering_minus1; ++ __u8 sps_max_num_reorder_pics; ++ __u8 sps_max_latency_increase_plus1; ++ __u8 log2_min_luma_coding_block_size_minus3; ++ __u8 log2_diff_max_min_luma_coding_block_size; ++ __u8 log2_min_luma_transform_block_size_minus2; ++ __u8 log2_diff_max_min_luma_transform_block_size; ++ __u8 max_transform_hierarchy_depth_inter; ++ __u8 max_transform_hierarchy_depth_intra; ++ __u8 pcm_sample_bit_depth_luma_minus1; ++ __u8 pcm_sample_bit_depth_chroma_minus1; ++ __u8 log2_min_pcm_luma_coding_block_size_minus3; ++ __u8 log2_diff_max_min_pcm_luma_coding_block_size; ++ __u8 num_short_term_ref_pic_sets; ++ __u8 num_long_term_ref_pics_sps; ++ __u8 chroma_format_idc; ++ ++ __u8 padding; ++ ++ __u64 flags; ++}; ++ ++#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0) ++#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1) ++#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2) ++#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3) ++#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4) ++#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5) ++#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6) ++#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7) ++#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8) ++#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9) ++#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10) ++#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11) ++#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12) ++#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13) ++#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14) ++#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15) ++#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16) ++#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17) ++#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18) ++ ++struct v4l2_ctrl_hevc_pps { ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ ++ __u8 num_extra_slice_header_bits; ++ __s8 init_qp_minus26; ++ __u8 diff_cu_qp_delta_depth; ++ __s8 pps_cb_qp_offset; ++ __s8 pps_cr_qp_offset; ++ __u8 num_tile_columns_minus1; ++ __u8 num_tile_rows_minus1; ++ __u8 column_width_minus1[20]; ++ __u8 row_height_minus1[22]; ++ __s8 pps_beta_offset_div2; ++ __s8 pps_tc_offset_div2; ++ __u8 log2_parallel_merge_level_minus2; ++ ++ __u8 padding[4]; ++ __u64 flags; ++}; ++ ++#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01 ++#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02 ++#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03 ++ ++#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16 ++ ++struct v4l2_hevc_dpb_entry { ++ __u64 timestamp; ++ __u8 rps; ++ __u8 field_pic; ++ __u16 pic_order_cnt[2]; ++ __u8 padding[2]; ++}; ++ ++struct v4l2_hevc_pred_weight_table { ++ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; ++ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; ++ ++ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; ++ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2]; ++ ++ __u8 padding[6]; ++ ++ __u8 luma_log2_weight_denom; ++ __s8 delta_chroma_log2_weight_denom; ++}; ++ ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7) ++#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8) ++ ++struct v4l2_ctrl_hevc_slice_params { ++ __u32 bit_size; ++ __u32 data_bit_offset; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ ++ __u8 nal_unit_type; ++ __u8 nuh_temporal_id_plus1; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ __u8 slice_type; ++ __u8 colour_plane_id; ++ __u16 slice_pic_order_cnt; ++ __u8 num_ref_idx_l0_active_minus1; ++ __u8 num_ref_idx_l1_active_minus1; ++ __u8 collocated_ref_idx; ++ __u8 five_minus_max_num_merge_cand; ++ __s8 slice_qp_delta; ++ __s8 slice_cb_qp_offset; ++ __s8 slice_cr_qp_offset; ++ __s8 slice_act_y_qp_offset; ++ __s8 slice_act_cb_qp_offset; ++ __s8 slice_act_cr_qp_offset; ++ __s8 slice_beta_offset_div2; ++ __s8 slice_tc_offset_div2; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */ ++ __u8 pic_struct; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ __u8 num_active_dpb_entries; ++ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ ++ __u8 num_rps_poc_st_curr_before; ++ __u8 num_rps_poc_st_curr_after; ++ __u8 num_rps_poc_lt_curr; ++ ++ __u8 padding; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */ ++ struct v4l2_hevc_pred_weight_table pred_weight_table; ++ ++ __u64 flags; ++}; ++ ++#endif +diff --git a/libavcodec/mpeg2-ctrls.h b/libavcodec/mpeg2-ctrls.h +new file mode 100644 +index 0000000000..6601455b3d +--- /dev/null ++++ b/libavcodec/mpeg2-ctrls.h +@@ -0,0 +1,82 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * These are the MPEG2 state controls for use with stateless MPEG-2 ++ * codec drivers. ++ * ++ * It turns out that these structs are not stable yet and will undergo ++ * more changes. So keep them private until they are stable and ready to ++ * become part of the official public API. ++ */ ++ ++#ifndef _MPEG2_CTRLS_H_ ++#define _MPEG2_CTRLS_H_ ++ ++#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) ++#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) ++ ++/* enum v4l2_ctrl_type type values */ ++#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 ++#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 ++ ++#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 ++#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 ++#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 ++#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 ++ ++struct v4l2_mpeg2_sequence { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ ++ __u16 horizontal_size; ++ __u16 vertical_size; ++ __u32 vbv_buffer_size; ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ ++ __u16 profile_and_level_indication; ++ __u8 progressive_sequence; ++ __u8 chroma_format; ++}; ++ ++struct v4l2_mpeg2_picture { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ ++ __u8 picture_coding_type; ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ ++ __u8 f_code[2][2]; ++ __u8 intra_dc_precision; ++ __u8 picture_structure; ++ __u8 top_field_first; ++ __u8 frame_pred_frame_dct; ++ __u8 concealment_motion_vectors; ++ __u8 q_scale_type; ++ __u8 intra_vlc_format; ++ __u8 alternate_scan; ++ __u8 repeat_first_field; ++ __u16 progressive_frame; ++}; ++ ++struct v4l2_ctrl_mpeg2_slice_params { ++ __u32 bit_size; ++ __u32 data_bit_offset; ++ __u64 backward_ref_ts; ++ __u64 forward_ref_ts; ++ ++ struct v4l2_mpeg2_sequence sequence; ++ struct v4l2_mpeg2_picture picture; ++ ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ ++ __u32 quantiser_scale_code; ++}; ++ ++struct v4l2_ctrl_mpeg2_quantization { ++ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ ++ __u8 load_intra_quantiser_matrix; ++ __u8 load_non_intra_quantiser_matrix; ++ __u8 load_chroma_intra_quantiser_matrix; ++ __u8 load_chroma_non_intra_quantiser_matrix; ++ ++ __u8 intra_quantiser_matrix[64]; ++ __u8 non_intra_quantiser_matrix[64]; ++ __u8 chroma_intra_quantiser_matrix[64]; ++ __u8 chroma_non_intra_quantiser_matrix[64]; ++}; ++ ++#endif +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +index bdaeb67d26..0254716e52 100644 +--- a/libavcodec/v4l2_request_h264.c ++++ b/libavcodec/v4l2_request_h264.c +@@ -19,6 +19,7 @@ + #include "h264dec.h" + #include "hwconfig.h" + #include "v4l2_request.h" ++#include "h264-ctrls.h" + + typedef struct V4L2RequestControlsH264 { + struct v4l2_ctrl_h264_sps sps; +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +index f724909546..c16f8a868e 100644 +--- a/libavcodec/v4l2_request_hevc.c ++++ b/libavcodec/v4l2_request_hevc.c +@@ -19,6 +19,7 @@ + #include "hevcdec.h" + #include "hwconfig.h" + #include "v4l2_request.h" ++#include "hevc-ctrls.h" + + #define MAX_SLICES 16 + +diff --git a/libavcodec/v4l2_request_mpeg2.c b/libavcodec/v4l2_request_mpeg2.c +index 88d86cc4c2..bc251a6fd2 100644 +--- a/libavcodec/v4l2_request_mpeg2.c ++++ b/libavcodec/v4l2_request_mpeg2.c +@@ -19,6 +19,7 @@ + #include "hwconfig.h" + #include "mpegvideo.h" + #include "v4l2_request.h" ++#include "mpeg2-ctrls.h" + + typedef struct V4L2RequestControlsMPEG2 { + struct v4l2_ctrl_mpeg2_slice_params slice_params; +diff --git a/libavcodec/v4l2_request_vp8.c b/libavcodec/v4l2_request_vp8.c +index 7e75ee398a..ea2c55fa2f 100644 +--- a/libavcodec/v4l2_request_vp8.c ++++ b/libavcodec/v4l2_request_vp8.c +@@ -19,6 +19,7 @@ + #include "hwconfig.h" + #include "v4l2_request.h" + #include "vp8.h" ++#include "vp8-ctrls.h" + + typedef struct V4L2RequestControlsVP8 { + struct v4l2_ctrl_vp8_frame_header ctrl; +diff --git a/libavcodec/vp8-ctrls.h b/libavcodec/vp8-ctrls.h +new file mode 100644 +index 0000000000..53cba826e4 +--- /dev/null ++++ b/libavcodec/vp8-ctrls.h +@@ -0,0 +1,112 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * These are the VP8 state controls for use with stateless VP8 ++ * codec drivers. ++ * ++ * It turns out that these structs are not stable yet and will undergo ++ * more changes. So keep them private until they are stable and ready to ++ * become part of the official public API. ++ */ ++ ++#ifndef _VP8_CTRLS_H_ ++#define _VP8_CTRLS_H_ ++ ++#include ++ ++#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') ++ ++#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_MPEG_BASE + 2000) ++#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301 ++ ++#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01 ++#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02 ++#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04 ++#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08 ++ ++struct v4l2_vp8_segment_header { ++ __s8 quant_update[4]; ++ __s8 lf_update[4]; ++ __u8 segment_probs[3]; ++ __u8 padding; ++ __u32 flags; ++}; ++ ++#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01 ++#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02 ++#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 ++struct v4l2_vp8_loopfilter_header { ++ __s8 ref_frm_delta[4]; ++ __s8 mb_mode_delta[4]; ++ __u8 sharpness_level; ++ __u8 level; ++ __u16 padding; ++ __u32 flags; ++}; ++ ++struct v4l2_vp8_quantization_header { ++ __u8 y_ac_qi; ++ __s8 y_dc_delta; ++ __s8 y2_dc_delta; ++ __s8 y2_ac_delta; ++ __s8 uv_dc_delta; ++ __s8 uv_ac_delta; ++ __u16 padding; ++}; ++ ++struct v4l2_vp8_entropy_header { ++ __u8 coeff_probs[4][8][3][11]; ++ __u8 y_mode_probs[4]; ++ __u8 uv_mode_probs[3]; ++ __u8 mv_probs[2][19]; ++ __u8 padding[3]; ++}; ++ ++struct v4l2_vp8_entropy_coder_state { ++ __u8 range; ++ __u8 value; ++ __u8 bit_count; ++ __u8 padding; ++}; ++ ++#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01 ++#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02 ++#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04 ++#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08 ++#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10 ++#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20 ++ ++#define VP8_FRAME_IS_KEY_FRAME(hdr) \ ++ (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME)) ++ ++struct v4l2_ctrl_vp8_frame_header { ++ struct v4l2_vp8_segment_header segment_header; ++ struct v4l2_vp8_loopfilter_header lf_header; ++ struct v4l2_vp8_quantization_header quant_header; ++ struct v4l2_vp8_entropy_header entropy_header; ++ struct v4l2_vp8_entropy_coder_state coder_state; ++ ++ __u16 width; ++ __u16 height; ++ ++ __u8 horizontal_scale; ++ __u8 vertical_scale; ++ ++ __u8 version; ++ __u8 prob_skip_false; ++ __u8 prob_intra; ++ __u8 prob_last; ++ __u8 prob_gf; ++ __u8 num_dct_parts; ++ ++ __u32 first_part_size; ++ __u32 first_part_header_bits; ++ __u32 dct_part_sizes[8]; ++ ++ __u64 last_frame_ts; ++ __u64 golden_frame_ts; ++ __u64 alt_frame_ts; ++ ++ __u64 flags; ++}; ++ ++#endif + +From 4b5474250e1adb4931afb6418403def0d914aaea Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Sat, 2 May 2020 11:00:26 +0000 +Subject: [PATCH 10/18] Update to v5.7 private linux headers + +--- + libavcodec/h264-ctrls.h | 2 ++ + libavcodec/v4l2_request_h264.c | 8 +++++++- + 2 files changed, 9 insertions(+), 1 deletion(-) + +diff --git a/libavcodec/h264-ctrls.h b/libavcodec/h264-ctrls.h +index e877bf1d53..1c6ff7d63b 100644 +--- a/libavcodec/h264-ctrls.h ++++ b/libavcodec/h264-ctrls.h +@@ -185,6 +185,8 @@ struct v4l2_ctrl_h264_slice_params { + #define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 + #define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 + #define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 ++#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08 ++#define V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD 0x10 + + struct v4l2_h264_dpb_entry { + __u64 reference_ts; +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +index 0254716e52..d28ed07da3 100644 +--- a/libavcodec/v4l2_request_h264.c ++++ b/libavcodec/v4l2_request_h264.c +@@ -67,8 +67,14 @@ static void fill_dpb_entry(struct v4l2_h264_dpb_entry *entry, const H264Picture + entry->frame_num = pic->frame_num; + entry->pic_num = pic->pic_id; + entry->flags = V4L2_H264_DPB_ENTRY_FLAG_VALID; +- if (pic->reference) ++ if (pic->reference) { + entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_ACTIVE; ++ if (pic->reference != PICT_FRAME) { ++ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_FIELD; ++ if (pic->reference == PICT_BOTTOM_FIELD) ++ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD; ++ } ++ } + if (pic->long_ref) + entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM; + if (pic->field_poc[0] != INT_MAX) + +From 5044b279b44f20c58cb92600d751984de6c8111c Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Sat, 2 May 2020 22:03:42 +0000 +Subject: [PATCH 11/18] Update to v5.8 private linux headers + +--- + libavcodec/h264-ctrls.h | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/libavcodec/h264-ctrls.h b/libavcodec/h264-ctrls.h +index 1c6ff7d63b..080fd1293c 100644 +--- a/libavcodec/h264-ctrls.h ++++ b/libavcodec/h264-ctrls.h +@@ -13,6 +13,12 @@ + + #include + ++/* ++ * Maximum DPB size, as specified by section 'A.3.1 Level limits ++ * common to the Baseline, Main, and Extended profiles'. ++ */ ++#define V4L2_H264_NUM_DPB_ENTRIES 16 ++ + /* Our pixel format isn't stable at the moment */ + #define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ + +@@ -201,7 +207,7 @@ struct v4l2_h264_dpb_entry { + #define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 + + struct v4l2_ctrl_h264_decode_params { +- struct v4l2_h264_dpb_entry dpb[16]; ++ struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]; + __u16 num_slices; + __u16 nal_ref_idc; + __s32 top_field_order_cnt; + +From 3503a60af592944589b2beae7ce004884dfe04e2 Mon Sep 17 00:00:00 2001 +From: Jonas Karlman +Date: Mon, 29 Apr 2019 22:08:59 +0000 +Subject: [PATCH 12/18] HACK: hwcontext_drm: do not require drm device + +Signed-off-by: Jonas Karlman +--- + libavutil/hwcontext_drm.c | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c +index 32cbde82eb..aa4794c5e6 100644 +--- a/libavutil/hwcontext_drm.c ++++ b/libavutil/hwcontext_drm.c +@@ -43,6 +43,11 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, + AVDRMDeviceContext *hwctx = hwdev->hwctx; + drmVersionPtr version; + ++ if (device == NULL) { ++ hwctx->fd = -1; ++ return 0; ++ } ++ + hwctx->fd = open(device, O_RDWR); + if (hwctx->fd < 0) + return AVERROR(errno); + +From 9e5907d59c23f5ccd4c48cfe37775411ce308107 Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 13/18] WIP: h264 field reference + +Signed-off-by: Jernej Skrabec +Signed-off-by: Jonas Karlman +--- + libavcodec/v4l2_request_h264.c | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c +index d28ed07da3..5b0f21a60d 100644 +--- a/libavcodec/v4l2_request_h264.c ++++ b/libavcodec/v4l2_request_h264.c +@@ -116,7 +116,8 @@ static uint8_t get_dpb_index(struct v4l2_ctrl_h264_decode_params *decode, const + struct v4l2_h264_dpb_entry *entry = &decode->dpb[i]; + if ((entry->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID) && + entry->reference_ts == timestamp) +- return i; ++ // TODO: signal reference type, possible using top 2 bits ++ return i | ((ref->reference & 3) << 6); + } + + return 0; + +From 9cf9b825c33690656331a9693b3132d1d82b75a8 Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 14/18] WIP: hevc scaling matrix + +Signed-off-by: Jernej Skrabec +--- + libavcodec/hevc-ctrls.h | 11 +++++++++++ + libavcodec/v4l2_request_hevc.c | 22 ++++++++++++++++++++++ + 2 files changed, 33 insertions(+) + +diff --git a/libavcodec/hevc-ctrls.h b/libavcodec/hevc-ctrls.h +index 1009cf0891..1592e52c36 100644 +--- a/libavcodec/hevc-ctrls.h ++++ b/libavcodec/hevc-ctrls.h +@@ -19,6 +19,7 @@ + #define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008) + #define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009) + #define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010) ++#define V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX (V4L2_CID_MPEG_BASE + 1011) + #define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015) + #define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016) + +@@ -26,6 +27,7 @@ + #define V4L2_CTRL_TYPE_HEVC_SPS 0x0120 + #define V4L2_CTRL_TYPE_HEVC_PPS 0x0121 + #define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122 ++#define V4L2_CTRL_TYPE_HEVC_SCALING_MATRIX 0x0123 + + enum v4l2_mpeg_video_hevc_decode_mode { + V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED, +@@ -209,4 +211,13 @@ struct v4l2_ctrl_hevc_slice_params { + __u64 flags; + }; + ++struct v4l2_ctrl_hevc_scaling_matrix { ++ __u8 scaling_list_4x4[6][16]; ++ __u8 scaling_list_8x8[6][64]; ++ __u8 scaling_list_16x16[6][64]; ++ __u8 scaling_list_32x32[2][64]; ++ __u8 scaling_list_dc_coef_16x16[6]; ++ __u8 scaling_list_dc_coef_32x32[2]; ++}; ++ + #endif +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +index c16f8a868e..f400bf4f3c 100644 +--- a/libavcodec/v4l2_request_hevc.c ++++ b/libavcodec/v4l2_request_hevc.c +@@ -26,6 +26,7 @@ + typedef struct V4L2RequestControlsHEVC { + struct v4l2_ctrl_hevc_sps sps; + struct v4l2_ctrl_hevc_pps pps; ++ struct v4l2_ctrl_hevc_scaling_matrix scaling_matrix; + struct v4l2_ctrl_hevc_slice_params slice_params[MAX_SLICES]; + int first_slice; + int num_slices; //TODO: this should be in control +@@ -295,6 +296,22 @@ static int v4l2_request_hevc_start_frame(AVCodecContext *avctx, + + fill_sps(&controls->sps, h); + ++ if (sl) { ++ for (int i = 0; i < 6; i++) { ++ for (int j = 0; j < 16; j++) ++ controls->scaling_matrix.scaling_list_4x4[i][j] = sl->sl[0][i][j]; ++ for (int j = 0; j < 64; j++) { ++ controls->scaling_matrix.scaling_list_8x8[i][j] = sl->sl[1][i][j]; ++ controls->scaling_matrix.scaling_list_16x16[i][j] = sl->sl[2][i][j]; ++ if (i < 2) ++ controls->scaling_matrix.scaling_list_32x32[i][j] = sl->sl[3][i * 3][j]; ++ } ++ controls->scaling_matrix.scaling_list_dc_coef_16x16[i] = sl->sl_dc[0][i]; ++ if (i < 2) ++ controls->scaling_matrix.scaling_list_dc_coef_32x32[i] = sl->sl_dc[1][i * 3]; ++ } ++ } ++ + /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ + controls->pps = (struct v4l2_ctrl_hevc_pps) { + .num_extra_slice_header_bits = pps->num_extra_slice_header_bits, +@@ -398,6 +415,11 @@ static int v4l2_request_hevc_queue_decode(AVCodecContext *avctx, int last_slice) + .ptr = &controls->pps, + .size = sizeof(controls->pps), + }, ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX, ++ .ptr = &controls->scaling_matrix, ++ .size = sizeof(controls->scaling_matrix), ++ }, + { + .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, + .ptr = &controls->slice_params, + +From 572a336f459070be3ac0ecc15a11057c6be6cb61 Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 15/18] WIP: hevc segment address + +Signed-off-by: Jernej Skrabec +--- + libavcodec/hevc-ctrls.h | 5 ++++- + libavcodec/v4l2_request_hevc.c | 3 +++ + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/libavcodec/hevc-ctrls.h b/libavcodec/hevc-ctrls.h +index 1592e52c36..3e2e320983 100644 +--- a/libavcodec/hevc-ctrls.h ++++ b/libavcodec/hevc-ctrls.h +@@ -167,6 +167,9 @@ struct v4l2_ctrl_hevc_slice_params { + __u32 bit_size; + __u32 data_bit_offset; + ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ __u32 slice_segment_addr; ++ + /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ + __u8 nal_unit_type; + __u8 nuh_temporal_id_plus1; +@@ -200,7 +203,7 @@ struct v4l2_ctrl_hevc_slice_params { + __u8 num_rps_poc_st_curr_after; + __u8 num_rps_poc_lt_curr; + +- __u8 padding; ++ __u8 padding[5]; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +index f400bf4f3c..98222fc74c 100644 +--- a/libavcodec/v4l2_request_hevc.c ++++ b/libavcodec/v4l2_request_hevc.c +@@ -138,6 +138,9 @@ static void v4l2_request_hevc_fill_slice_params(const HEVCContext *h, + .bit_size = 0, + .data_bit_offset = get_bits_count(&h->HEVClc->gb), + ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ ++ .slice_segment_addr = sh->slice_segment_addr, ++ + /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ + .nal_unit_type = h->nal_unit_type, + .nuh_temporal_id_plus1 = h->temporal_id + 1, + +From 8777f29fef8e41f305ecf54dbc9908fca2e5d98a Mon Sep 17 00:00:00 2001 +From: Jernej Skrabec +Date: Sat, 15 Dec 2018 22:32:16 +0100 +Subject: [PATCH 16/18] WIP: hevc entry point offsets + +Signed-off-by: Jernej Skrabec +--- + libavcodec/hevc-ctrls.h | 5 ++++- + libavcodec/v4l2_request_hevc.c | 9 +++++++++ + 2 files changed, 13 insertions(+), 1 deletion(-) + +diff --git a/libavcodec/hevc-ctrls.h b/libavcodec/hevc-ctrls.h +index 3e2e320983..d1b094c8aa 100644 +--- a/libavcodec/hevc-ctrls.h ++++ b/libavcodec/hevc-ctrls.h +@@ -169,6 +169,7 @@ struct v4l2_ctrl_hevc_slice_params { + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + __u32 slice_segment_addr; ++ __u32 num_entry_point_offsets; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */ + __u8 nal_unit_type; +@@ -203,7 +204,9 @@ struct v4l2_ctrl_hevc_slice_params { + __u8 num_rps_poc_st_curr_after; + __u8 num_rps_poc_lt_curr; + +- __u8 padding[5]; ++ __u8 padding; ++ ++ __u32 entry_point_offset_minus1[256]; + + /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */ + struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX]; +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +index 98222fc74c..7e77c83e4e 100644 +--- a/libavcodec/v4l2_request_hevc.c ++++ b/libavcodec/v4l2_request_hevc.c +@@ -225,6 +225,15 @@ static void v4l2_request_hevc_fill_slice_params(const HEVCContext *h, + } + + v4l2_request_hevc_fill_pred_table(h, &slice_params->pred_weight_table); ++ ++ slice_params->num_entry_point_offsets = sh->num_entry_point_offsets; ++ if (slice_params->num_entry_point_offsets > 256) { ++ slice_params->num_entry_point_offsets = 256; ++ av_log(NULL, AV_LOG_ERROR, "%s: Currently only 256 entry points are supported, but slice has %d entry points.\n", __func__, sh->num_entry_point_offsets); ++ } ++ ++ for (i = 0; i < slice_params->num_entry_point_offsets; i++) ++ slice_params->entry_point_offset_minus1[i] = sh->entry_point_offset[i] - 1; + } + + static void fill_sps(struct v4l2_ctrl_hevc_sps *ctrl, const HEVCContext *h) + +From 931d210b66a033eed21dab4be65ae93f2198d9b5 Mon Sep 17 00:00:00 2001 +From: Boris Brezillon +Date: Thu, 12 Dec 2019 16:13:55 +0100 +Subject: [PATCH 17/18] WIP: Add V4L2 request API vp9 hwaccel + +Signed-off-by: Boris Brezillon +--- + configure | 3 + + libavcodec/Makefile | 1 + + libavcodec/hwaccels.h | 1 + + libavcodec/v4l2_request_vp9.c | 352 ++++++++++++++++++++++++++++++++++ + libavcodec/vp9.c | 13 +- + libavcodec/vp9shared.h | 1 + + 6 files changed, 370 insertions(+), 1 deletion(-) + create mode 100644 libavcodec/v4l2_request_vp9.c + +diff --git a/configure b/configure +index 623012757c..2b723df55a 100755 +--- a/configure ++++ b/configure +@@ -3015,6 +3015,8 @@ vp9_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_VP9" + vp9_dxva2_hwaccel_select="vp9_decoder" + vp9_nvdec_hwaccel_deps="nvdec" + vp9_nvdec_hwaccel_select="vp9_decoder" ++vp9_v4l2request_hwaccel_deps="v4l2_request vp9_v4l2_request" ++vp9_v4l2request_hwaccel_select="vp9_decoder" + vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth" + vp9_vaapi_hwaccel_select="vp9_decoder" + vp9_vdpau_hwaccel_deps="vdpau VdpPictureInfoVP9" +@@ -6579,6 +6581,7 @@ check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;" + check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;" + check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" + check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;" ++check_cc vp9_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP9_FRAME;" + + check_headers sys/videoio.h + test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete +diff --git a/libavcodec/Makefile b/libavcodec/Makefile +index d6af854daa..2f0e0a0976 100644 +--- a/libavcodec/Makefile ++++ b/libavcodec/Makefile +@@ -945,6 +945,7 @@ OBJS-$(CONFIG_VP8_VAAPI_HWACCEL) += vaapi_vp8.o + OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o + OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o + OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o ++OBJS-$(CONFIG_VP9_V4L2REQUEST_HWACCEL) += v4l2_request_vp9.o + OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o + OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o + OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec_other.o +diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h +index bd75e94f4c..03a1aefe09 100644 +--- a/libavcodec/hwaccels.h ++++ b/libavcodec/hwaccels.h +@@ -71,6 +71,7 @@ extern const AVHWAccel ff_vp9_d3d11va_hwaccel; + extern const AVHWAccel ff_vp9_d3d11va2_hwaccel; + extern const AVHWAccel ff_vp9_dxva2_hwaccel; + extern const AVHWAccel ff_vp9_nvdec_hwaccel; ++extern const AVHWAccel ff_vp9_v4l2request_hwaccel; + extern const AVHWAccel ff_vp9_vaapi_hwaccel; + extern const AVHWAccel ff_vp9_vdpau_hwaccel; + extern const AVHWAccel ff_wmv3_d3d11va_hwaccel; +diff --git a/libavcodec/v4l2_request_vp9.c b/libavcodec/v4l2_request_vp9.c +new file mode 100644 +index 0000000000..4074c7fe4b +--- /dev/null ++++ b/libavcodec/v4l2_request_vp9.c +@@ -0,0 +1,352 @@ ++/* ++ * This file is part of FFmpeg. ++ * ++ * FFmpeg is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU Lesser General Public ++ * License as published by the Free Software Foundation; either ++ * version 2.1 of the License, or (at your option) any later version. ++ * ++ * FFmpeg is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * Lesser General Public License for more details. ++ * ++ * You should have received a copy of the GNU Lesser General Public ++ * License along with FFmpeg; if not, write to the Free Software ++ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA ++ */ ++ ++#include "hwconfig.h" ++#include "v4l2_request.h" ++#include "vp9dec.h" ++ ++typedef struct V4L2RequestControlsVP9 { ++ struct v4l2_ctrl_vp9_frame_decode_params decode_params; ++} V4L2RequestControlsVP9; ++ ++static const uint8_t ff_to_v4l2_intramode[] = { ++ [VERT_PRED] = V4L2_VP9_INTRA_PRED_MODE_V, ++ [HOR_PRED] = V4L2_VP9_INTRA_PRED_MODE_H, ++ [DC_PRED] = V4L2_VP9_INTRA_PRED_MODE_DC, ++ [DIAG_DOWN_LEFT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D45, ++ [DIAG_DOWN_RIGHT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D135, ++ [VERT_RIGHT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D117, ++ [HOR_DOWN_PRED] = V4L2_VP9_INTRA_PRED_MODE_D153, ++ [VERT_LEFT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D63, ++ [HOR_UP_PRED] = V4L2_VP9_INTRA_PRED_MODE_D207, ++ [TM_VP8_PRED] = V4L2_VP9_INTRA_PRED_MODE_TM, ++}; ++ ++static int v4l2_request_vp9_set_frame_ctx(AVCodecContext *avctx, unsigned int id) ++{ ++ VP9Context *s = avctx->priv_data; ++ struct v4l2_ctrl_vp9_frame_ctx fctx = {}; ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(id), ++ .ptr = &fctx, ++ .size = sizeof(fctx), ++ }, ++ }; ++ ++ memcpy(fctx.probs.tx8, s->prob_ctx[id].p.tx8p, sizeof(s->prob_ctx[id].p.tx8p)); ++ memcpy(fctx.probs.tx16, s->prob_ctx[id].p.tx16p, sizeof(s->prob_ctx[id].p.tx16p)); ++ memcpy(fctx.probs.tx32, s->prob_ctx[id].p.tx32p, sizeof(s->prob_ctx[id].p.tx32p)); ++ memcpy(fctx.probs.coef, s->prob_ctx[id].coef, sizeof(s->prob_ctx[id].coef)); ++ memcpy(fctx.probs.skip, s->prob_ctx[id].p.skip, sizeof(s->prob_ctx[id].p.skip)); ++ memcpy(fctx.probs.inter_mode, s->prob_ctx[id].p.mv_mode, sizeof(s->prob_ctx[id].p.mv_mode)); ++ memcpy(fctx.probs.interp_filter, s->prob_ctx[id].p.filter, sizeof(s->prob_ctx[id].p.filter)); ++ memcpy(fctx.probs.is_inter, s->prob_ctx[id].p.intra, sizeof(s->prob_ctx[id].p.intra)); ++ memcpy(fctx.probs.comp_mode, s->prob_ctx[id].p.comp, sizeof(s->prob_ctx[id].p.comp)); ++ memcpy(fctx.probs.single_ref, s->prob_ctx[id].p.single_ref, sizeof(s->prob_ctx[id].p.single_ref)); ++ memcpy(fctx.probs.comp_ref, s->prob_ctx[id].p.comp_ref, sizeof(s->prob_ctx[id].p.comp_ref)); ++ memcpy(fctx.probs.y_mode, s->prob_ctx[id].p.y_mode, sizeof(s->prob_ctx[id].p.y_mode)); ++ for (unsigned i = 0; i < 10; i++) ++ memcpy(fctx.probs.uv_mode[ff_to_v4l2_intramode[i]], s->prob_ctx[id].p.uv_mode[i], sizeof(s->prob_ctx[id].p.uv_mode[0])); ++ for (unsigned i = 0; i < 4; i++) ++ memcpy(fctx.probs.partition[i * 4], s->prob_ctx[id].p.partition[3 - i], sizeof(s->prob_ctx[id].p.partition[0])); ++ memcpy(fctx.probs.mv.joint, s->prob_ctx[id].p.mv_joint, sizeof(s->prob_ctx[id].p.mv_joint)); ++ for (unsigned i = 0; i < 2; i++) { ++ fctx.probs.mv.sign[i] = s->prob_ctx[id].p.mv_comp[i].sign; ++ memcpy(fctx.probs.mv.class[i], s->prob_ctx[id].p.mv_comp[i].classes, sizeof(s->prob_ctx[id].p.mv_comp[0].classes)); ++ fctx.probs.mv.class0_bit[i] = s->prob_ctx[id].p.mv_comp[i].class0; ++ memcpy(fctx.probs.mv.bits[i], s->prob_ctx[id].p.mv_comp[i].bits, sizeof(s->prob_ctx[id].p.mv_comp[0].bits)); ++ memcpy(fctx.probs.mv.class0_fr[i], s->prob_ctx[id].p.mv_comp[i].class0_fp, sizeof(s->prob_ctx[id].p.mv_comp[0].class0_fp)); ++ memcpy(fctx.probs.mv.fr[i], s->prob_ctx[id].p.mv_comp[i].fp, sizeof(s->prob_ctx[id].p.mv_comp[0].fp)); ++ fctx.probs.mv.class0_hp[i] = s->prob_ctx[id].p.mv_comp[i].class0_hp; ++ fctx.probs.mv.hp[i] = s->prob_ctx[id].p.mv_comp[i].hp; ++ } ++ ++ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); ++} ++ ++static int v4l2_request_vp9_get_frame_ctx(AVCodecContext *avctx, unsigned int id) ++{ ++ VP9Context *s = avctx->priv_data; ++ struct v4l2_ctrl_vp9_frame_ctx fctx = {}; ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(id), ++ .ptr = &fctx, ++ .size = sizeof(fctx), ++ }, ++ }; ++ ++ int ret = ff_v4l2_request_get_controls(avctx, control, FF_ARRAY_ELEMS(control)); ++ if (ret) ++ return ret; ++ ++ memcpy(s->prob_ctx[id].p.tx8p, fctx.probs.tx8, sizeof(s->prob_ctx[id].p.tx8p)); ++ memcpy(s->prob_ctx[id].p.tx16p, fctx.probs.tx16, sizeof(s->prob_ctx[id].p.tx16p)); ++ memcpy(s->prob_ctx[id].p.tx32p, fctx.probs.tx32, sizeof(s->prob_ctx[id].p.tx32p)); ++ memcpy(s->prob_ctx[id].coef, fctx.probs.coef, sizeof(s->prob_ctx[id].coef)); ++ memcpy(s->prob_ctx[id].p.skip, fctx.probs.skip, sizeof(s->prob_ctx[id].p.skip)); ++ memcpy(s->prob_ctx[id].p.mv_mode, fctx.probs.inter_mode, sizeof(s->prob_ctx[id].p.mv_mode)); ++ memcpy(s->prob_ctx[id].p.filter, fctx.probs.interp_filter, sizeof(s->prob_ctx[id].p.filter)); ++ memcpy(s->prob_ctx[id].p.intra, fctx.probs.is_inter, sizeof(s->prob_ctx[id].p.intra)); ++ memcpy(s->prob_ctx[id].p.comp, fctx.probs.comp_mode, sizeof(s->prob_ctx[id].p.comp)); ++ memcpy(s->prob_ctx[id].p.single_ref, fctx.probs.single_ref, sizeof(s->prob_ctx[id].p.single_ref)); ++ memcpy(s->prob_ctx[id].p.comp_ref, fctx.probs.comp_ref, sizeof(s->prob_ctx[id].p.comp_ref)); ++ memcpy(s->prob_ctx[id].p.y_mode, fctx.probs.y_mode, sizeof(s->prob_ctx[id].p.y_mode)); ++ for (unsigned i = 0; i < 10; i++) ++ memcpy(s->prob_ctx[id].p.uv_mode[i], fctx.probs.uv_mode[ff_to_v4l2_intramode[i]], sizeof(s->prob_ctx[id].p.uv_mode[0])); ++ for (unsigned i = 0; i < 4; i++) ++ memcpy(s->prob_ctx[id].p.partition[3 - i], fctx.probs.partition[i * 4], sizeof(s->prob_ctx[id].p.partition[0])); ++ memcpy(s->prob_ctx[id].p.mv_joint, fctx.probs.mv.joint, sizeof(s->prob_ctx[id].p.mv_joint)); ++ for (unsigned i = 0; i < 2; i++) { ++ s->prob_ctx[id].p.mv_comp[i].sign = fctx.probs.mv.sign[i]; ++ memcpy(s->prob_ctx[id].p.mv_comp[i].classes, fctx.probs.mv.class[i], sizeof(s->prob_ctx[id].p.mv_comp[0].classes)); ++ s->prob_ctx[id].p.mv_comp[i].class0 = fctx.probs.mv.class0_bit[i]; ++ memcpy(s->prob_ctx[id].p.mv_comp[i].bits, fctx.probs.mv.bits[i], sizeof(s->prob_ctx[id].p.mv_comp[0].bits)); ++ memcpy(s->prob_ctx[id].p.mv_comp[i].class0_fp, fctx.probs.mv.class0_fr[i], sizeof(s->prob_ctx[id].p.mv_comp[0].class0_fp)); ++ memcpy(s->prob_ctx[id].p.mv_comp[i].fp, fctx.probs.mv.fr[i], sizeof(s->prob_ctx[id].p.mv_comp[0].fp)); ++ s->prob_ctx[id].p.mv_comp[i].class0_hp = fctx.probs.mv.class0_hp[i]; ++ s->prob_ctx[id].p.mv_comp[i].hp = fctx.probs.mv.hp[i]; ++ } ++ ++ return 0; ++} ++ ++static int v4l2_request_vp9_start_frame(AVCodecContext *avctx, ++ av_unused const uint8_t *buffer, ++ av_unused uint32_t size) ++{ ++ const VP9Context *s = avctx->priv_data; ++ const VP9Frame *f = &s->s.frames[CUR_FRAME]; ++ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private; ++ struct v4l2_ctrl_vp9_frame_decode_params *dec_params = &controls->decode_params; ++ int ret; ++ ++ if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) { ++ for (unsigned i = 0; i < 4; i++) { ++ ret = v4l2_request_vp9_set_frame_ctx(avctx, i); ++ if (ret) ++ return ret; ++ } ++ } else if (s->s.h.intraonly && s->s.h.resetctx == 2) { ++ ret = v4l2_request_vp9_set_frame_ctx(avctx, s->s.h.framectxid); ++ if (ret) ++ return ret; ++ } ++ ++ if (s->s.h.keyframe) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_KEY_FRAME; ++ if (!s->s.h.invisible) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_SHOW_FRAME; ++ if (s->s.h.errorres) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT; ++ if (s->s.h.intraonly) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_INTRA_ONLY; ++ if (!s->s.h.keyframe && s->s.h.highprecisionmvs) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV; ++ if (s->s.h.refreshctx) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX; ++ if (s->s.h.parallelmode) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE; ++ if (s->ss_h) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING; ++ if (s->ss_v) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; ++ if (avctx->color_range == AVCOL_RANGE_JPEG) ++ dec_params->flags |= V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING; ++ ++ dec_params->compressed_header_size = s->s.h.compressed_header_size; ++ dec_params->uncompressed_header_size = s->s.h.uncompressed_header_size; ++ dec_params->profile = s->s.h.profile; ++ dec_params->reset_frame_context = s->s.h.resetctx > 0 ? s->s.h.resetctx - 1 : 0; ++ dec_params->frame_context_idx = s->s.h.framectxid; ++ dec_params->bit_depth = s->s.h.bpp; ++ ++ dec_params->interpolation_filter = s->s.h.filtermode ^ (s->s.h.filtermode <= 1); ++ dec_params->tile_cols_log2 = s->s.h.tiling.log2_tile_cols; ++ dec_params->tile_rows_log2 = s->s.h.tiling.log2_tile_rows; ++ dec_params->tx_mode = s->s.h.txfmmode; ++ dec_params->reference_mode = s->s.h.comppredmode; ++ dec_params->frame_width_minus_1 = s->w - 1; ++ dec_params->frame_height_minus_1 = s->h - 1; ++ //dec_params->render_width_minus_1 = avctx->width - 1; ++ //dec_params->render_height_minus_1 = avctx->height - 1; ++ ++ for (unsigned i = 0; i < 3; i++) { ++ const ThreadFrame *ref = &s->s.refs[s->s.h.refidx[i]]; ++ if (ref->f && ref->f->buf[0]) ++ dec_params->refs[i] = ff_v4l2_request_get_capture_timestamp(ref->f); ++ } ++ ++ if (s->s.h.lf_delta.enabled) ++ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED; ++ if (s->s.h.lf_delta.updated) ++ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE; ++ ++ dec_params->lf.level = s->s.h.filter.level; ++ dec_params->lf.sharpness = s->s.h.filter.sharpness; ++ for (unsigned i = 0; i < 4; i++) ++ dec_params->lf.ref_deltas[i] = s->s.h.lf_delta.ref[i]; ++ for (unsigned i = 0; i < 2; i++) ++ dec_params->lf.mode_deltas[i] = s->s.h.lf_delta.mode[i]; ++ for (unsigned i = 0; i < 8; i++) { ++ for (unsigned j = 0; j < 4; j++) ++ memcpy(dec_params->lf.level_lookup[i][j], s->s.h.segmentation.feat[i].lflvl[j], sizeof(dec_params->lf.level_lookup[0][0])); ++ } ++ ++ dec_params->quant.base_q_idx = s->s.h.yac_qi; ++ dec_params->quant.delta_q_y_dc = s->s.h.ydc_qdelta; ++ dec_params->quant.delta_q_uv_dc = s->s.h.uvdc_qdelta; ++ dec_params->quant.delta_q_uv_ac = s->s.h.uvac_qdelta; ++ ++ if (s->s.h.segmentation.enabled) ++ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ENABLED; ++ if (s->s.h.segmentation.update_map) ++ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP; ++ if (s->s.h.segmentation.temporal) ++ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE; ++ if (s->s.h.segmentation.update_data) ++ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA; ++ if (s->s.h.segmentation.absolute_vals) ++ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE; ++ ++ for (unsigned i = 0; i < 7; i++) ++ dec_params->seg.tree_probs[i] = s->s.h.segmentation.prob[i]; ++ ++ if (s->s.h.segmentation.temporal) { ++ for (unsigned i = 0; i < 3; i++) ++ dec_params->seg.pred_probs[i] = s->s.h.segmentation.pred_prob[i]; ++ } else { ++ memset(dec_params->seg.pred_probs, 255, sizeof(dec_params->seg.pred_probs)); ++ } ++ ++ for (unsigned i = 0; i < 8; i++) { ++ if (s->s.h.segmentation.feat[i].q_enabled) { ++ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_QP_DELTA; ++ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_QP_DELTA] = s->s.h.segmentation.feat[i].q_val; ++ } ++ ++ if (s->s.h.segmentation.feat[i].lf_enabled) { ++ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_LF; ++ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_LF] = s->s.h.segmentation.feat[i].lf_val; ++ } ++ ++ if (s->s.h.segmentation.feat[i].ref_enabled) { ++ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_REF_FRAME; ++ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_REF_FRAME] = s->s.h.segmentation.feat[i].ref_val; ++ } ++ ++ if (s->s.h.segmentation.feat[i].skip_enabled) ++ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_SKIP; ++ } ++ ++ memcpy(dec_params->probs.tx8, s->prob.p.tx8p, sizeof(s->prob.p.tx8p)); ++ memcpy(dec_params->probs.tx16, s->prob.p.tx16p, sizeof(s->prob.p.tx16p)); ++ memcpy(dec_params->probs.tx32, s->prob.p.tx32p, sizeof(s->prob.p.tx32p)); ++ for (unsigned i = 0; i < 4; i++) { ++ for (unsigned j = 0; j < 2; j++) { ++ for (unsigned k = 0; k < 2; k++) { ++ for (unsigned l = 0; l < 6; l++) { ++ for (unsigned m = 0; m < 6; m++) { ++ memcpy(dec_params->probs.coef[i][j][k][l][m], s->prob.coef[i][j][k][l][m], sizeof(dec_params->probs.coef[0][0][0][0][0])); ++ } ++ } ++ } ++ } ++ } ++ memcpy(dec_params->probs.skip, s->prob.p.skip, sizeof(s->prob.p.skip)); ++ memcpy(dec_params->probs.inter_mode, s->prob.p.mv_mode, sizeof(s->prob.p.mv_mode)); ++ memcpy(dec_params->probs.interp_filter, s->prob.p.filter, sizeof(s->prob.p.filter)); ++ memcpy(dec_params->probs.is_inter, s->prob.p.intra, sizeof(s->prob.p.intra)); ++ memcpy(dec_params->probs.comp_mode, s->prob.p.comp, sizeof(s->prob.p.comp)); ++ memcpy(dec_params->probs.single_ref, s->prob.p.single_ref, sizeof(s->prob.p.single_ref)); ++ memcpy(dec_params->probs.comp_ref, s->prob.p.comp_ref, sizeof(s->prob.p.comp_ref)); ++ memcpy(dec_params->probs.y_mode, s->prob.p.y_mode, sizeof(s->prob.p.y_mode)); ++ for (unsigned i = 0; i < 10; i++) ++ memcpy(dec_params->probs.uv_mode[ff_to_v4l2_intramode[i]], s->prob.p.uv_mode[i], sizeof(s->prob.p.uv_mode[0])); ++ for (unsigned i = 0; i < 4; i++) ++ memcpy(dec_params->probs.partition[i * 4], s->prob.p.partition[3 - i], sizeof(s->prob.p.partition[0])); ++ memcpy(dec_params->probs.mv.joint, s->prob.p.mv_joint, sizeof(s->prob.p.mv_joint)); ++ for (unsigned i = 0; i < 2; i++) { ++ dec_params->probs.mv.sign[i] = s->prob.p.mv_comp[i].sign; ++ memcpy(dec_params->probs.mv.class[i], s->prob.p.mv_comp[i].classes, sizeof(s->prob.p.mv_comp[0].classes)); ++ dec_params->probs.mv.class0_bit[i] = s->prob.p.mv_comp[i].class0; ++ memcpy(dec_params->probs.mv.bits[i], s->prob.p.mv_comp[i].bits, sizeof(s->prob.p.mv_comp[0].bits)); ++ memcpy(dec_params->probs.mv.class0_fr[i], s->prob.p.mv_comp[i].class0_fp, sizeof(s->prob.p.mv_comp[0].class0_fp)); ++ memcpy(dec_params->probs.mv.fr[i], s->prob.p.mv_comp[i].fp, sizeof(s->prob.p.mv_comp[0].fp)); ++ dec_params->probs.mv.class0_hp[i] = s->prob.p.mv_comp[i].class0_hp; ++ dec_params->probs.mv.hp[i] = s->prob.p.mv_comp[i].hp; ++ } ++ ++ return ff_v4l2_request_reset_frame(avctx, f->tf.f); ++} ++ ++static int v4l2_request_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) ++{ ++ const VP9Context *s = avctx->priv_data; ++ const VP9Frame *f = &s->s.frames[CUR_FRAME]; ++ ++ return ff_v4l2_request_append_output_buffer(avctx, f->tf.f, buffer, size); ++} ++ ++static int v4l2_request_vp9_end_frame(AVCodecContext *avctx) ++{ ++ const VP9Context *s = avctx->priv_data; ++ const VP9Frame *f = &s->s.frames[CUR_FRAME]; ++ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private; ++ int ret; ++ ++ struct v4l2_ext_control control[] = { ++ { ++ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_DECODE_PARAMS, ++ .ptr = &controls->decode_params, ++ .size = sizeof(controls->decode_params), ++ }, ++ }; ++ ++ ret = ff_v4l2_request_decode_frame(avctx, f->tf.f, control, FF_ARRAY_ELEMS(control)); ++ if (ret) ++ return ret; ++ ++ if (!s->s.h.refreshctx) ++ return 0; ++ ++ return v4l2_request_vp9_get_frame_ctx(avctx, s->s.h.framectxid); ++} ++ ++static int v4l2_request_vp9_init(AVCodecContext *avctx) ++{ ++ // TODO: check V4L2_CID_MPEG_VIDEO_VP9_PROFILE ++ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP9_FRAME, 3 * 1024 * 1024, NULL, 0); ++} ++ ++const AVHWAccel ff_vp9_v4l2request_hwaccel = { ++ .name = "vp9_v4l2request", ++ .type = AVMEDIA_TYPE_VIDEO, ++ .id = AV_CODEC_ID_VP9, ++ .pix_fmt = AV_PIX_FMT_DRM_PRIME, ++ .start_frame = v4l2_request_vp9_start_frame, ++ .decode_slice = v4l2_request_vp9_decode_slice, ++ .end_frame = v4l2_request_vp9_end_frame, ++ .frame_priv_data_size = sizeof(V4L2RequestControlsVP9), ++ .init = v4l2_request_vp9_init, ++ .uninit = ff_v4l2_request_uninit, ++ .priv_data_size = sizeof(V4L2RequestContext), ++ .frame_params = ff_v4l2_request_frame_params, ++ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++}; +diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c +index fd0bab14a2..434f905c62 100644 +--- a/libavcodec/vp9.c ++++ b/libavcodec/vp9.c +@@ -191,6 +191,7 @@ static int update_size(AVCodecContext *avctx, int w, int h) + #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \ + CONFIG_VP9_D3D11VA_HWACCEL * 2 + \ + CONFIG_VP9_NVDEC_HWACCEL + \ ++ CONFIG_VP9_V4L2REQUEST_HWACCEL + \ + CONFIG_VP9_VAAPI_HWACCEL + \ + CONFIG_VP9_VDPAU_HWACCEL) + enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts; +@@ -223,6 +224,9 @@ static int update_size(AVCodecContext *avctx, int w, int h) + #endif + #if CONFIG_VP9_VAAPI_HWACCEL + *fmtp++ = AV_PIX_FMT_VAAPI; ++#endif ++#if CONFIG_VP9_V4L2REQUEST_HWACCEL ++ *fmtp++ = AV_PIX_FMT_DRM_PRIME; + #endif + break; + case AV_PIX_FMT_YUV420P12: +@@ -231,6 +235,9 @@ static int update_size(AVCodecContext *avctx, int w, int h) + #endif + #if CONFIG_VP9_VAAPI_HWACCEL + *fmtp++ = AV_PIX_FMT_VAAPI; ++#endif ++#if CONFIG_VP9_V4L2REQUEST_HWACCEL ++ *fmtp++ = AV_PIX_FMT_DRM_PRIME; + #endif + break; + } +@@ -700,7 +707,8 @@ static int decode_frame_header(AVCodecContext *avctx, + get_bits(&s->gb, 8) : 255; + } + +- if (get_bits1(&s->gb)) { ++ s->s.h.segmentation.update_data = get_bits1(&s->gb); ++ if (s->s.h.segmentation.update_data) { + s->s.h.segmentation.absolute_vals = get_bits1(&s->gb); + for (i = 0; i < 8; i++) { + if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb))) +@@ -1909,6 +1917,9 @@ AVCodec ff_vp9_decoder = { + #endif + #if CONFIG_VP9_VDPAU_HWACCEL + HWACCEL_VDPAU(vp9), ++#endif ++#if CONFIG_VP9_V4L2REQUEST_HWACCEL ++ HWACCEL_V4L2REQUEST(vp9), + #endif + NULL + }, +diff --git a/libavcodec/vp9shared.h b/libavcodec/vp9shared.h +index 54726df742..fee3568736 100644 +--- a/libavcodec/vp9shared.h ++++ b/libavcodec/vp9shared.h +@@ -131,6 +131,7 @@ typedef struct VP9BitstreamHeader { + uint8_t temporal; + uint8_t absolute_vals; + uint8_t update_map; ++ uint8_t update_data; + uint8_t prob[7]; + uint8_t pred_prob[3]; + struct { + +From 3e956323f01b221d7a38ad0a3293d337cd106f3f Mon Sep 17 00:00:00 2001 +From: Boris Brezillon +Date: Thu, 12 Dec 2019 16:13:55 +0100 +Subject: [PATCH 18/18] WIP: Add and use vp9 private linux header + +Signed-off-by: Boris Brezillon +--- + configure | 2 +- + libavcodec/v4l2_request_vp9.c | 1 + + libavcodec/vp9-ctrls.h | 485 ++++++++++++++++++++++++++++++++++ + 3 files changed, 487 insertions(+), 1 deletion(-) + create mode 100644 libavcodec/vp9-ctrls.h + +diff --git a/configure b/configure +index 2b723df55a..87c6836af2 100755 +--- a/configure ++++ b/configure +@@ -3015,7 +3015,7 @@ vp9_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_VP9" + vp9_dxva2_hwaccel_select="vp9_decoder" + vp9_nvdec_hwaccel_deps="nvdec" + vp9_nvdec_hwaccel_select="vp9_decoder" +-vp9_v4l2request_hwaccel_deps="v4l2_request vp9_v4l2_request" ++vp9_v4l2request_hwaccel_deps="v4l2_request" + vp9_v4l2request_hwaccel_select="vp9_decoder" + vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth" + vp9_vaapi_hwaccel_select="vp9_decoder" +diff --git a/libavcodec/v4l2_request_vp9.c b/libavcodec/v4l2_request_vp9.c +index 4074c7fe4b..2e10b7ad1a 100644 +--- a/libavcodec/v4l2_request_vp9.c ++++ b/libavcodec/v4l2_request_vp9.c +@@ -19,6 +19,7 @@ + #include "hwconfig.h" + #include "v4l2_request.h" + #include "vp9dec.h" ++#include "vp9-ctrls.h" + + typedef struct V4L2RequestControlsVP9 { + struct v4l2_ctrl_vp9_frame_decode_params decode_params; +diff --git a/libavcodec/vp9-ctrls.h b/libavcodec/vp9-ctrls.h +new file mode 100644 +index 0000000000..0cdea8a18b +--- /dev/null ++++ b/libavcodec/vp9-ctrls.h +@@ -0,0 +1,485 @@ ++/* SPDX-License-Identifier: GPL-2.0 */ ++/* ++ * These are the VP9 state controls for use with stateless VP9 ++ * codec drivers. ++ * ++ * It turns out that these structs are not stable yet and will undergo ++ * more changes. So keep them private until they are stable and ready to ++ * become part of the official public API. ++ */ ++ ++#ifndef _VP9_CTRLS_H_ ++#define _VP9_CTRLS_H_ ++ ++#include ++ ++#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') ++ ++#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(i) (V4L2_CID_MPEG_BASE + 4000 + (i)) ++#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_DECODE_PARAMS (V4L2_CID_MPEG_BASE + 4004) ++#define V4L2_CTRL_TYPE_VP9_FRAME_CONTEXT 0x400 ++#define V4L2_CTRL_TYPE_VP9_FRAME_DECODE_PARAMS 0x404 ++ ++/** ++ * enum v4l2_vp9_loop_filter_flags - VP9 loop filter flags ++ * ++ * @V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED: the filter level depends on ++ * the mode and reference frame used ++ * to predict a block ++ * @V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE: the bitstream contains additional ++ * syntax elements that specify which ++ * mode and reference frame deltas ++ * are to be updated ++ * ++ * Those are the flags you should pass to &v4l2_vp9_loop_filter.flags. See ++ * section '7.2.8 Loop filter semantics' of the VP9 specification for more ++ * details. ++ */ ++enum v4l2_vp9_loop_filter_flags { ++ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED = 1 << 0, ++ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE = 1 << 1, ++}; ++ ++/** ++ * struct v4l2_vp9_loop_filter - VP9 loop filter parameters ++ * ++ * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_* flags ++ * @level: indicates the loop filter strength ++ * @sharpness: indicates the sharpness level ++ * @ref_deltas: contains the adjustment needed for the filter level based on ++ * the chosen reference frame ++ * @mode_deltas: contains the adjustment needed for the filter level based on ++ * the chosen mode ++ * @level_lookup: level lookup table ++ * ++ * This structure contains all loop filter related parameters. See sections ++ * '7.2.8 Loop filter semantics' and '8.8.1 Loop filter frame init process' ++ * of the VP9 specification for more details. ++ */ ++struct v4l2_vp9_loop_filter { ++ __u8 flags; ++ __u8 level; ++ __u8 sharpness; ++ __s8 ref_deltas[4]; ++ __s8 mode_deltas[2]; ++ __u8 level_lookup[8][4][2]; ++}; ++ ++/** ++ * struct v4l2_vp9_quantization - VP9 quantization parameters ++ * ++ * @base_q_idx: indicates the base frame qindex ++ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx ++ * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx ++ * @delta_q_uv_ac indicates the UV AC quantizer relative to base_q_idx ++ * @padding: padding bytes to align things on 64 bits. Must be set to 0 ++ * ++ * Encodes the quantization parameters. See section '7.2.9 Quantization params ++ * syntax' of the VP9 specification for more details. ++ */ ++struct v4l2_vp9_quantization { ++ __u8 base_q_idx; ++ __s8 delta_q_y_dc; ++ __s8 delta_q_uv_dc; ++ __s8 delta_q_uv_ac; ++ __u8 padding[4]; ++}; ++ ++/** ++ * enum v4l2_vp9_segmentation_flags - VP9 segmentation flags ++ * ++ * @V4L2_VP9_SEGMENTATION_FLAG_ENABLED: indicates that this frame makes use of ++ * the segmentation tool ++ * @V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP: indicates that the segmentation map ++ * should be updated during the ++ * decoding of this frame ++ * @V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE: indicates that the updates to ++ * the segmentation map are coded ++ * relative to the existing ++ * segmentation map ++ * @V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA: indicates that new parameters are ++ * about to be specified for each ++ * segment ++ * @V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE: indicates that the ++ * segmentation parameters ++ * represent the actual values ++ * to be used ++ * ++ * Those are the flags you should pass to &v4l2_vp9_segmentation.flags. See ++ * section '7.2.10 Segmentation params syntax' of the VP9 specification for ++ * more details. ++ */ ++enum v4l2_vp9_segmentation_flags { ++ V4L2_VP9_SEGMENTATION_FLAG_ENABLED = 1 << 0, ++ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP = 1 << 1, ++ V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE = 1 << 2, ++ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA = 1 << 3, ++ V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE = 1 << 4, ++}; ++ ++#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id)) ++#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf ++ ++/** ++ * enum v4l2_vp9_segment_feature - VP9 segment feature IDs ++ * ++ * @V4L2_VP9_SEGMENT_FEATURE_QP_DELTA: QP delta segment feature ++ * @V4L2_VP9_SEGMENT_FEATURE_LF: loop filter segment feature ++ * @V4L2_VP9_SEGMENT_FEATURE_REF_FRAME: reference frame segment feature ++ * @V4L2_VP9_SEGMENT_FEATURE_SKIP: skip segment feature ++ * @V4L2_VP9_SEGMENT_FEATURE_CNT: number of segment features ++ * ++ * Segment feature IDs. See section '7.2.10 Segmentation params syntax' of the ++ * VP9 specification for more details. ++ */ ++enum v4l2_vp9_segment_feature { ++ V4L2_VP9_SEGMENT_FEATURE_QP_DELTA, ++ V4L2_VP9_SEGMENT_FEATURE_LF, ++ V4L2_VP9_SEGMENT_FEATURE_REF_FRAME, ++ V4L2_VP9_SEGMENT_FEATURE_SKIP, ++ V4L2_VP9_SEGMENT_FEATURE_CNT, ++}; ++ ++/** ++ * struct v4l2_vp9_segmentation - VP9 segmentation parameters ++ * ++ * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_* flags ++ * @tree_probs: specifies the probability values to be used when ++ * decoding a Segment-ID. See '5.15. Segmentation map' ++ * section of the VP9 specification for more details. ++ * @pred_prob: specifies the probability values to be used when decoding a ++ * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' ++ * section of :ref:`vp9` for more details.. ++ * @padding: padding used to make things aligned on 64 bits. Shall be zero ++ * filled ++ * @feature_enabled: bitmask defining which features are enabled in each ++ * segment ++ * @feature_data: data attached to each feature. Data entry is only valid if ++ * the feature is enabled ++ * ++ * Encodes the quantization parameters. See section '7.2.10 Segmentation ++ * params syntax' of the VP9 specification for more details. ++ */ ++struct v4l2_vp9_segmentation { ++ __u8 flags; ++ __u8 tree_probs[7]; ++ __u8 pred_probs[3]; ++ __u8 padding[5]; ++ __u8 feature_enabled[8]; ++ __s16 feature_data[8][4]; ++}; ++ ++/** ++ * enum v4l2_vp9_intra_prediction_mode - VP9 Intra prediction modes ++ * ++ * @V4L2_VP9_INTRA_PRED_DC: DC intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_V: vertical intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_H: horizontal intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D45: D45 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D135: D135 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D117: D117 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D153: D153 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D207: D207 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_D63: D63 intra prediction ++ * @V4L2_VP9_INTRA_PRED_MODE_TM: True Motion intra prediction ++ * ++ * See section '7.4.5 Intra frame mode info semantics' for more details. ++ */ ++enum v4l2_vp9_intra_prediction_mode { ++ V4L2_VP9_INTRA_PRED_MODE_DC, ++ V4L2_VP9_INTRA_PRED_MODE_V, ++ V4L2_VP9_INTRA_PRED_MODE_H, ++ V4L2_VP9_INTRA_PRED_MODE_D45, ++ V4L2_VP9_INTRA_PRED_MODE_D135, ++ V4L2_VP9_INTRA_PRED_MODE_D117, ++ V4L2_VP9_INTRA_PRED_MODE_D153, ++ V4L2_VP9_INTRA_PRED_MODE_D207, ++ V4L2_VP9_INTRA_PRED_MODE_D63, ++ V4L2_VP9_INTRA_PRED_MODE_TM, ++}; ++ ++/** ++ * struct v4l2_vp9_mv_probabilities - VP9 Motion vector probabilities ++ * @joint: motion vector joint probabilities ++ * @sign: motion vector sign probabilities ++ * @class: motion vector class probabilities ++ * @class0_bit: motion vector class0 bit probabilities ++ * @bits: motion vector bits probabilities ++ * @class0_fr: motion vector class0 fractional bit probabilities ++ * @fr: motion vector fractional bit probabilities ++ * @class0_hp: motion vector class0 high precision fractional bit probabilities ++ * @hp: motion vector high precision fractional bit probabilities ++ */ ++struct v4l2_vp9_mv_probabilities { ++ __u8 joint[3]; ++ __u8 sign[2]; ++ __u8 class[2][10]; ++ __u8 class0_bit[2]; ++ __u8 bits[2][10]; ++ __u8 class0_fr[2][2][3]; ++ __u8 fr[2][3]; ++ __u8 class0_hp[2]; ++ __u8 hp[2]; ++}; ++ ++/** ++ * struct v4l2_vp9_probabilities - VP9 Probabilities ++ * ++ * @tx8: TX 8x8 probabilities ++ * @tx16: TX 16x16 probabilities ++ * @tx32: TX 32x32 probabilities ++ * @coef: coefficient probabilities ++ * @skip: skip probabilities ++ * @inter_mode: inter mode probabilities ++ * @interp_filter: interpolation filter probabilities ++ * @is_inter: is inter-block probabilities ++ * @comp_mode: compound prediction mode probabilities ++ * @single_ref: single ref probabilities ++ * @comp_ref: compound ref probabilities ++ * @y_mode: Y prediction mode probabilities ++ * @uv_mode: UV prediction mode probabilities ++ * @partition: partition probabilities ++ * @mv: motion vector probabilities ++ * ++ * Structure containing most VP9 probabilities. See the VP9 specification ++ * for more details. ++ */ ++struct v4l2_vp9_probabilities { ++ __u8 tx8[2][1]; ++ __u8 tx16[2][2]; ++ __u8 tx32[2][3]; ++ __u8 coef[4][2][2][6][6][3]; ++ __u8 skip[3]; ++ __u8 inter_mode[7][3]; ++ __u8 interp_filter[4][2]; ++ __u8 is_inter[4]; ++ __u8 comp_mode[5]; ++ __u8 single_ref[5][2]; ++ __u8 comp_ref[5]; ++ __u8 y_mode[4][9]; ++ __u8 uv_mode[10][9]; ++ __u8 partition[16][3]; ++ ++ struct v4l2_vp9_mv_probabilities mv; ++}; ++ ++/** ++ * enum v4l2_vp9_reset_frame_context - Valid values for ++ * &v4l2_ctrl_vp9_frame_decode_params->reset_frame_context ++ * ++ * @V4L2_VP9_RESET_FRAME_CTX_NONE: don't reset any frame context ++ * @V4L2_VP9_RESET_FRAME_CTX_SPEC: reset the frame context pointed by ++ * &v4l2_ctrl_vp9_frame_decode_params.frame_context_idx ++ * @V4L2_VP9_RESET_FRAME_CTX_ALL: reset all frame contexts ++ * ++ * See section '7.2 Uncompressed header semantics' of the VP9 specification ++ * for more details. ++ */ ++enum v4l2_vp9_reset_frame_context { ++ V4L2_VP9_RESET_FRAME_CTX_NONE, ++ V4L2_VP9_RESET_FRAME_CTX_SPEC, ++ V4L2_VP9_RESET_FRAME_CTX_ALL, ++}; ++ ++/** ++ * enum v4l2_vp9_interpolation_filter - VP9 interpolation filter types ++ * ++ * @V4L2_VP9_INTERP_FILTER_8TAP: height tap filter ++ * @V4L2_VP9_INTERP_FILTER_8TAP_SMOOTH: height tap smooth filter ++ * @V4L2_VP9_INTERP_FILTER_8TAP_SHARP: height tap sharp filter ++ * @V4L2_VP9_INTERP_FILTER_BILINEAR: bilinear filter ++ * @V4L2_VP9_INTERP_FILTER_SWITCHABLE: filter selection is signaled at the ++ * block level ++ * ++ * See section '7.2.7 Interpolation filter semantics' of the VP9 specification ++ * for more details. ++ */ ++enum v4l2_vp9_interpolation_filter { ++ V4L2_VP9_INTERP_FILTER_8TAP, ++ V4L2_VP9_INTERP_FILTER_8TAP_SMOOTH, ++ V4L2_VP9_INTERP_FILTER_8TAP_SHARP, ++ V4L2_VP9_INTERP_FILTER_BILINEAR, ++ V4L2_VP9_INTERP_FILTER_SWITCHABLE, ++}; ++ ++/** ++ * enum v4l2_vp9_reference_mode - VP9 reference modes ++ * ++ * @V4L2_VP9_REF_MODE_SINGLE: indicates that all the inter blocks use only a ++ * single reference frame to generate motion ++ * compensated prediction ++ * @V4L2_VP9_REF_MODE_COMPOUND: requires all the inter blocks to use compound ++ * mode. Single reference frame prediction is not ++ * allowed ++ * @V4L2_VP9_REF_MODE_SELECT: allows each individual inter block to select ++ * between single and compound prediction modes ++ * ++ * See section '7.3.6 Frame reference mode semantics' of the VP9 specification ++ * for more details. ++ */ ++enum v4l2_vp9_reference_mode { ++ V4L2_VP9_REF_MODE_SINGLE, ++ V4L2_VP9_REF_MODE_COMPOUND, ++ V4L2_VP9_REF_MODE_SELECT, ++}; ++ ++/** ++ * enum v4l2_vp9_tx_mode - VP9 TX modes ++ * ++ * @V4L2_VP9_TX_MODE_ONLY_4X4: transform size is 4x4 ++ * @V4L2_VP9_TX_MODE_ALLOW_8X8: transform size can be up to 8x8 ++ * @V4L2_VP9_TX_MODE_ALLOW_16X16: transform size can be up to 16x16 ++ * @V4L2_VP9_TX_MODE_ALLOW_32X32: transform size can be up to 32x32 ++ * @V4L2_VP9_TX_MODE_SELECT: bitstream contains transform size for each block ++ * ++ * See section '7.3.1 Tx mode semantics' of the VP9 specification for more ++ * details. ++ */ ++enum v4l2_vp9_tx_mode { ++ V4L2_VP9_TX_MODE_ONLY_4X4, ++ V4L2_VP9_TX_MODE_ALLOW_8X8, ++ V4L2_VP9_TX_MODE_ALLOW_16X16, ++ V4L2_VP9_TX_MODE_ALLOW_32X32, ++ V4L2_VP9_TX_MODE_SELECT, ++}; ++ ++/** ++ * enum v4l2_vp9_ref_id - VP9 Reference frame IDs ++ * ++ * @V4L2_REF_ID_LAST: last reference frame ++ * @V4L2_REF_ID_GOLDEN: golden reference frame ++ * @V4L2_REF_ID_ALTREF: alternative reference frame ++ * @V4L2_REF_ID_CNT: number of reference frames ++ * ++ * See section '7.4.12 Ref frames semantics' of the VP9 specification for more ++ * details. ++ */ ++enum v4l2_vp9_ref_id { ++ V4L2_REF_ID_LAST, ++ V4L2_REF_ID_GOLDEN, ++ V4L2_REF_ID_ALTREF, ++ V4L2_REF_ID_CNT, ++}; ++ ++/** ++ * enum v4l2_vp9_frame_flags - VP9 frame flags ++ * @V4L2_VP9_FRAME_FLAG_KEY_FRAME: the frame is a key frame ++ * @V4L2_VP9_FRAME_FLAG_SHOW_FRAME: the frame should be displayed ++ * @V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT: the decoding should be error resilient ++ * @V4L2_VP9_FRAME_FLAG_INTRA_ONLY: the frame does not reference other frames ++ * @V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV: the frame might can high precision ++ * motion vectors ++ * @V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX: frame context should be updated ++ * after decoding ++ * @V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE: parallel decoding is used ++ * @V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING: vertical subsampling is enabled ++ * @V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING: horizontal subsampling is enabled ++ * @V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING: full UV range is used ++ * ++ * Check the VP9 specification for more details. ++ */ ++enum v4l2_vp9_frame_flags { ++ V4L2_VP9_FRAME_FLAG_KEY_FRAME = 1 << 0, ++ V4L2_VP9_FRAME_FLAG_SHOW_FRAME = 1 << 1, ++ V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT = 1 << 2, ++ V4L2_VP9_FRAME_FLAG_INTRA_ONLY = 1 << 3, ++ V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV = 1 << 4, ++ V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX = 1 << 5, ++ V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE = 1 << 6, ++ V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING = 1 << 7, ++ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING = 1 << 8, ++ V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING = 1 << 9, ++}; ++ ++#define V4L2_VP9_PROFILE_MAX 3 ++ ++/** ++ * struct v4l2_ctrl_vp9_frame_decode_params - VP9 frame decoding control ++ * ++ * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags ++ * @compressed_header_size: compressed header size in bytes ++ * @uncompressed_header_size: uncompressed header size in bytes ++ * @profile: VP9 profile. Can be 0, 1, 2 or 3 ++ * @reset_frame_context: specifies whether the frame context should be reset ++ * to default values. See &v4l2_vp9_reset_frame_context ++ * for more details ++ * @frame_context_idx: frame context that should be used/updated ++ * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all ++ * profiles support 10 and/or 12 bits depths ++ * @interpolation_filter: specifies the filter selection used for performing ++ * inter prediction. See &v4l2_vp9_interpolation_filter ++ * for more details ++ * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile ++ * (where the width is measured in units of 8x8 blocks). ++ * Shall be less than or equal to 6 ++ * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile ++ * (where the height is measured in units of 8x8 blocks) ++ * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details ++ * @reference_mode: specifies the type of inter prediction to be used. See ++ * &v4l2_vp9_reference_mode for more details ++ * @padding: needed to make this struct 64 bit aligned. Shall be filled with ++ * zeros ++ * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed ++ * in pixels ++ * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed ++ * in pixels ++ * @frame_width_minus_1: add 1 to it and you'll get the expected render width ++ * expressed in pixels. This is not used during the ++ * decoding process but might be used by HW scalers to ++ * prepare a frame that's ready for scanout ++ * @frame_height_minus_1: add 1 to it and you'll get the expected render height ++ * expressed in pixels. This is not used during the ++ * decoding process but might be used by HW scalers to ++ * prepare a frame that's ready for scanout ++ * @refs: array of reference frames. See &v4l2_vp9_ref_id for more details ++ * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details ++ * @quant: quantization parameters. See &v4l2_vp9_quantization for more details ++ * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details ++ * @probs: probabilities. See &v4l2_vp9_probabilities for more details ++ */ ++struct v4l2_ctrl_vp9_frame_decode_params { ++ __u32 flags; ++ __u16 compressed_header_size; ++ __u16 uncompressed_header_size; ++ __u8 profile; ++ __u8 reset_frame_context; ++ __u8 frame_context_idx; ++ __u8 bit_depth; ++ __u8 interpolation_filter; ++ __u8 tile_cols_log2; ++ __u8 tile_rows_log2; ++ __u8 tx_mode; ++ __u8 reference_mode; ++ __u8 padding[6]; ++ __u16 frame_width_minus_1; ++ __u16 frame_height_minus_1; ++ __u16 render_width_minus_1; ++ __u16 render_height_minus_1; ++ __u64 refs[V4L2_REF_ID_CNT]; ++ struct v4l2_vp9_loop_filter lf; ++ struct v4l2_vp9_quantization quant; ++ struct v4l2_vp9_segmentation seg; ++ struct v4l2_vp9_probabilities probs; ++}; ++ ++#define V4L2_VP9_NUM_FRAME_CTX 4 ++ ++/** ++ * struct v4l2_ctrl_vp9_frame_ctx - VP9 frame context control ++ * ++ * @probs: VP9 probabilities ++ * ++ * This control is accessed in both direction. The user should initialize the ++ * 4 contexts with default values just after starting the stream. Then before ++ * decoding a frame it should query the current frame context (the one passed ++ * through &v4l2_ctrl_vp9_frame_decode_params.frame_context_idx) to initialize ++ * &v4l2_ctrl_vp9_frame_decode_params.probs. The probs are then adjusted based ++ * on the bitstream info and passed to the kernel. The codec should update ++ * the frame context after the frame has been decoded, so that next time ++ * userspace query this context it contains the updated probabilities. ++ */ ++struct v4l2_ctrl_vp9_frame_ctx { ++ struct v4l2_vp9_probabilities probs; ++}; ++ ++#endif /* _VP9_CTRLS_H_ */