diff --git a/packages/graphics/bcm2835-driver/package.mk b/packages/graphics/bcm2835-driver/package.mk index 917721b578..40aa487cb5 100644 --- a/packages/graphics/bcm2835-driver/package.mk +++ b/packages/graphics/bcm2835-driver/package.mk @@ -3,8 +3,8 @@ # Copyright (C) 2017-present Team LibreELEC (https://libreelec.tv) PKG_NAME="bcm2835-driver" -PKG_VERSION="1df55790fb191704c0ce630d4d0713a8beb43a7d" -PKG_SHA256="9be26aae349616aa2cfa82c11f3188efb848e36d6ebe401f5d8682e376483d56" +PKG_VERSION="5ceac9414fd634dbc0762d80677744465634af2f" +PKG_SHA256="5e9f690ffb378748c8b4e9f3d8e8688701932b50db9202842cd8df2808993f70" PKG_LICENSE="nonfree" PKG_SITE="http://www.broadcom.com" PKG_URL="${DISTRO_SRC}/${PKG_NAME}-${PKG_VERSION}.tar.xz" diff --git a/packages/graphics/bcm2835-driver/system.d/unbind-console.service b/packages/graphics/bcm2835-driver/system.d/unbind-console.service deleted file mode 100644 index 544f1b50f6..0000000000 --- a/packages/graphics/bcm2835-driver/system.d/unbind-console.service +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=unbind Framebuffer console - -ConditionPathExists=/sys/class/vtconsole/vtcon1/bind - -[Service] -Type=oneshot -RemainAfterExit=yes -ExecStart=/bin/sh -c 'echo 0 > /sys/class/vtconsole/vtcon1/bind' - -[Install] -WantedBy=graphical.target diff --git a/packages/linux/package.mk b/packages/linux/package.mk index 56f3375288..8ec7030567 100644 --- a/packages/linux/package.mk +++ b/packages/linux/package.mk @@ -22,8 +22,8 @@ case "${LINUX}" in PKG_SOURCE_NAME="linux-${LINUX}-${PKG_VERSION}.tar.gz" ;; raspberrypi) - PKG_VERSION="7fb9d006d3ff3baf2e205e0c85c4e4fd0a64fcd0" # 5.10.27 - PKG_SHA256="e70ecd479ea323d00ed441f1888256687dec96b0f4098585b1646f2d5d930eff" + PKG_VERSION="9b672c5441497337f286686122a6f17fd7985cc2" # 5.10.46 + PKG_SHA256="5f620ce59d6f27dd77f7675517c3f691b40c4b9cce6d608f3bbfd3e449f9125c" PKG_URL="https://github.com/raspberrypi/linux/archive/${PKG_VERSION}.tar.gz" PKG_SOURCE_NAME="linux-${LINUX}-${PKG_VERSION}.tar.gz" ;; diff --git a/packages/multimedia/ffmpeg/patches/rpi/ffmpeg-001-rpi.patch b/packages/multimedia/ffmpeg/patches/rpi/ffmpeg-001-rpi.patch index b54dc78905..2713d44743 100644 --- a/packages/multimedia/ffmpeg/patches/rpi/ffmpeg-001-rpi.patch +++ b/packages/multimedia/ffmpeg/patches/rpi/ffmpeg-001-rpi.patch @@ -1,86 +1,5 @@ -diff --git a/.gitignore b/.gitignore -index 2450ee8fc5..4bcc3ae643 100644 ---- a/.gitignore -+++ b/.gitignore -@@ -1,6 +1,7 @@ - *.a - *.o - *.o.* -+*.bin - *.d - *.def - *.dll -@@ -26,6 +27,7 @@ - .\#* - /.config - /.version -+/build/ - /ffmpeg - /ffplay - /ffprobe -diff --git a/BUILD.txt b/BUILD.txt -new file mode 100644 -index 0000000000..49ed1f119d ---- /dev/null -+++ b/BUILD.txt -@@ -0,0 +1,55 @@ -+# Setup & Build instructions for testing Argon30 mesa support (on Pi4) -+ -+# These assume that the drm_mmal test for Sand8 has been built on this Pi -+# as build relies on many of the same files -+ -+# 1st get everything required to build ffmpeg -+# If sources aren't already enabled on your Pi then enable them -+sudo su -+sed "s/#deb-src/deb-src/" /etc/apt/sources.list > /tmp/sources.list -+sed "s/#deb-src/deb-src/" /etc/apt/sources.list.d/raspi.list > /tmp/raspi.list -+mv /tmp/sources.list /etc/apt/ -+mv /tmp/raspi.list /etc/apt/sources.list.d/ -+apt update -+ -+# Get dependancies -+sudo apt build-dep ffmpeg -+ -+# Enable H265 V4L2 request decoder -+sudo su -+echo dtoverlay=rpivid-v4l2 >> /boot/config.txt -+reboot -+# Check it has turned up -+ls -la /dev/video* -+# This should include video19 -+# crw-rw----+ 1 root video 81, 7 Aug 4 17:25 /dev/video19 -+ -+# Config -+pi-util/conf_native.sh -+ -+# Build (this is a bit dull) -+# If you want to poke the source the libavdevice/egl_vout.c contains the -+# output code - -+make -j6 -+ -+# Grab test streams -+wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-h264.mkv -+wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc.mkv -+wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc-10bit.mkv -+ -+# Test i420 output (works currently) -+./ffmpeg -no_cvt_hw -vcodec h264_v4l2m2m -i jellyfish-3-mbps-hd-h264.mkv -f vout_egl - -+ -+# Test Sand8 output - doesn't currently work but should once you have -+# Sand8 working in drm_mmal. I can't guarantee that this will work as -+# I can't test this path with a known working format, but the debug looks -+# good. If this doesn't work & drm_mmal does with sand8 then come back to me -+./ffmpeg -no_cvt_hw -hwaccel drm -vcodec hevc -i jellyfish-3-mbps-hd-hevc.mkv -f vout_egl - -+ -+# Test Sand30 - doesn't currently work -+# (Beware that when FFmpeg errors out it often leaves your teminal window -+# in a state where you need to reset it) -+./ffmpeg -no_cvt_hw -hwaccel drm -vcodec hevc -i jellyfish-3-mbps-hd-hevc-10bit.mkv -f vout_egl - -+ -+ -+ diff --git a/configure b/configure -index 8569a60bf8..277d36cf9a 100755 +index 36713ab658..89a47e046f 100755 --- a/configure +++ b/configure @@ -274,6 +274,7 @@ External library support: @@ -190,16 +109,7 @@ index 8569a60bf8..277d36cf9a 100755 vaapi_x11_deps="xlib" videotoolbox_hwaccel_deps="videotoolbox pthreads" videotoolbox_hwaccel_extralibs="-framework QuartzCore" -@@ -2920,6 +2941,8 @@ h264_dxva2_hwaccel_deps="dxva2" - h264_dxva2_hwaccel_select="h264_decoder" - h264_nvdec_hwaccel_deps="nvdec" - h264_nvdec_hwaccel_select="h264_decoder" -+h264_v4l2request_hwaccel_deps="v4l2_request" -+h264_v4l2request_hwaccel_select="h264_decoder" - h264_vaapi_hwaccel_deps="vaapi" - h264_vaapi_hwaccel_select="h264_decoder" - h264_vdpau_hwaccel_deps="vdpau" -@@ -2934,6 +2957,12 @@ hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC" +@@ -2934,6 +2955,12 @@ hevc_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_HEVC" hevc_dxva2_hwaccel_select="hevc_decoder" hevc_nvdec_hwaccel_deps="nvdec" hevc_nvdec_hwaccel_select="hevc_decoder" @@ -212,34 +122,7 @@ index 8569a60bf8..277d36cf9a 100755 hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC" hevc_vaapi_hwaccel_select="hevc_decoder" hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC" -@@ -2962,6 +2991,8 @@ mpeg2_dxva2_hwaccel_deps="dxva2" - mpeg2_dxva2_hwaccel_select="mpeg2video_decoder" - mpeg2_nvdec_hwaccel_deps="nvdec" - mpeg2_nvdec_hwaccel_select="mpeg2video_decoder" -+mpeg2_v4l2request_hwaccel_deps="v4l2_request mpeg2_v4l2_request" -+mpeg2_v4l2request_hwaccel_select="mpeg2video_decoder" - mpeg2_vaapi_hwaccel_deps="vaapi" - mpeg2_vaapi_hwaccel_select="mpeg2video_decoder" - mpeg2_vdpau_hwaccel_deps="vdpau" -@@ -2992,6 +3023,8 @@ vc1_vdpau_hwaccel_deps="vdpau" - vc1_vdpau_hwaccel_select="vc1_decoder" - vp8_nvdec_hwaccel_deps="nvdec" - vp8_nvdec_hwaccel_select="vp8_decoder" -+vp8_v4l2request_hwaccel_deps="v4l2_request" -+vp8_v4l2request_hwaccel_select="vp8_decoder" - vp8_vaapi_hwaccel_deps="vaapi" - vp8_vaapi_hwaccel_select="vp8_decoder" - vp9_d3d11va_hwaccel_deps="d3d11va DXVA_PicParams_VP9" -@@ -3002,6 +3035,8 @@ vp9_dxva2_hwaccel_deps="dxva2 DXVA_PicParams_VP9" - vp9_dxva2_hwaccel_select="vp9_decoder" - vp9_nvdec_hwaccel_deps="nvdec" - vp9_nvdec_hwaccel_select="vp9_decoder" -+vp9_v4l2request_hwaccel_deps="v4l2_request" -+vp9_v4l2request_hwaccel_select="vp9_decoder" - vp9_vaapi_hwaccel_deps="vaapi VADecPictureParameterBufferVP9_bit_depth" - vp9_vaapi_hwaccel_select="vp9_decoder" - vp9_vdpau_hwaccel_deps="vdpau VdpPictureInfoVP9" -@@ -3401,8 +3436,14 @@ sndio_indev_deps="sndio" +@@ -3401,8 +3428,13 @@ sndio_indev_deps="sndio" sndio_outdev_deps="sndio" v4l2_indev_deps_any="linux_videodev2_h sys_videoio_h" v4l2_indev_suggest="libv4l2" @@ -247,23 +130,21 @@ index 8569a60bf8..277d36cf9a 100755 v4l2_outdev_deps_any="linux_videodev2_h sys_videoio_h" v4l2_outdev_suggest="libv4l2" +vout_drm_outdev_deps="libdrm vout_drm" -+vout_egl_outdev_deps="vout_egl" +vout_egl_outdev_select="epoxy" +vout_rpi_outdev_deps="rpi" +vout_rpi_outdev_select="sand" vfwcap_indev_deps="vfw32 vfwcap_defines" xcbgrab_indev_deps="libxcb" xcbgrab_indev_suggest="libxcb_shm libxcb_shape libxcb_xfixes" -@@ -3618,6 +3659,8 @@ tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping" +@@ -3618,6 +3650,7 @@ tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping" tonemap_opencl_filter_deps="opencl const_nan" transpose_opencl_filter_deps="opencl" transpose_vaapi_filter_deps="vaapi VAProcPipelineCaps_rotation_flags" -+unsand_filter_deps="rpi" +unsand_filter_select="sand" unsharp_opencl_filter_deps="opencl" uspp_filter_deps="gpl avcodec" vaguedenoiser_filter_deps="gpl" -@@ -6299,6 +6342,7 @@ enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.4.0" "dav1d +@@ -6299,6 +6332,7 @@ enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.4.0" "dav1d enabled libdavs2 && require_pkg_config libdavs2 "davs2 >= 1.6.0" davs2.h davs2_decoder_open enabled libdc1394 && require_pkg_config libdc1394 libdc1394-2 dc1394/dc1394.h dc1394_new enabled libdrm && require_pkg_config libdrm libdrm xf86drm.h drmGetVersion @@ -271,7 +152,7 @@ index 8569a60bf8..277d36cf9a 100755 enabled libfdk_aac && { check_pkg_config libfdk_aac fdk-aac "fdk-aac/aacenc_lib.h" aacEncOpen || { require libfdk_aac fdk-aac/aacenc_lib.h aacEncOpen -lfdk-aac && warn "using libfdk without pkg-config"; } } -@@ -6376,6 +6420,7 @@ enabled libtls && require_pkg_config libtls libtls tls.h tls_configur +@@ -6376,6 +6410,7 @@ enabled libtls && require_pkg_config libtls libtls tls.h tls_configur enabled libtwolame && require libtwolame twolame.h twolame_init -ltwolame && { check_lib libtwolame twolame.h twolame_encode_buffer_float32_interleaved -ltwolame || die "ERROR: libtwolame must be installed and version must be >= 0.3.10"; } @@ -279,7 +160,7 @@ index 8569a60bf8..277d36cf9a 100755 enabled libv4l2 && require_pkg_config libv4l2 libv4l2 libv4l2.h v4l2_ioctl enabled libvidstab && require_pkg_config libvidstab "vidstab >= 0.98" vid.stab/libvidstab.h vsMotionDetectInit enabled libvmaf && require_pkg_config libvmaf "libvmaf >= 1.3.9" libvmaf.h compute_vmaf -@@ -6430,11 +6475,12 @@ enabled mbedtls && { check_pkg_config mbedtls mbedtls mbedtls/x509_crt +@@ -6430,11 +6465,12 @@ enabled mbedtls && { check_pkg_config mbedtls mbedtls mbedtls/x509_crt check_lib mbedtls mbedtls/ssl.h mbedtls_ssl_init -lmbedtls -lmbedx509 -lmbedcrypto || die "ERROR: mbedTLS not found"; } enabled mediacodec && { enabled jni || die "ERROR: mediacodec requires --enable-jni"; } @@ -294,7 +175,7 @@ index 8569a60bf8..277d36cf9a 100755 die "ERROR: mmal not found" && check_func_headers interface/mmal/mmal.h "MMAL_PARAMETER_VIDEO_MAX_NUM_CALLBACKS"; } enabled openal && { { for al_extralibs in "${OPENAL_LIBS}" "-lopenal" "-lOpenAL32"; do -@@ -6475,6 +6521,10 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r +@@ -6475,6 +6511,10 @@ enabled rkmpp && { require_pkg_config rkmpp rockchip_mpp rockchip/r { enabled libdrm || die "ERROR: rkmpp requires --enable-libdrm"; } } @@ -305,22 +186,17 @@ index 8569a60bf8..277d36cf9a 100755 enabled vapoursynth && require_pkg_config vapoursynth "vapoursynth-script >= 42" VSScript.h vsscript_init -@@ -6556,6 +6606,13 @@ if enabled v4l2_m2m; then +@@ -6556,6 +6596,8 @@ if enabled v4l2_m2m; then check_cc vp9_v4l2_m2m linux/videodev2.h "int i = V4L2_PIX_FMT_VP9;" fi +check_func_headers "linux/media.h linux/videodev2.h" v4l2_timeval_to_ns -+check_cc h264_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_H264_SLICE;" +check_cc hevc_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_HEVC_SLICE;" -+check_cc mpeg2_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_MPEG2_SLICE;" -+check_cc vp8_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP8_FRAME;" -+check_cc vp9_v4l2_request linux/videodev2.h "int i = V4L2_PIX_FMT_VP9_FRAME;" -+ check_headers sys/videoio.h test_code cc sys/videoio.h "struct v4l2_frmsizeenum vfse; vfse.discrete.width = 0;" && enable_sanitized struct_v4l2_frmivalenum_discrete diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c -index 2e9448ea2b..faa8501dd0 100644 +index 2e9448ea2b..65a050b272 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -2118,8 +2118,8 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) @@ -334,25 +210,17 @@ index 2e9448ea2b..faa8501dd0 100644 break; } -@@ -2367,6 +2367,8 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ - if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) { - ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames; - } else -+ { -+#if 0 - av_log(ist->dec_ctx, AV_LOG_WARNING, - "video_delay is larger in decoder than demuxer %d > %d.\n" - "If you want to help, upload a sample " -@@ -2374,6 +2376,8 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ - "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n", - ist->dec_ctx->has_b_frames, - ist->st->codecpar->video_delay); -+#endif -+ } - } +@@ -2130,6 +2130,9 @@ static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame) + (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data)) + need_reinit = 1; - if (ret != AVERROR_EOF) -@@ -2400,8 +2404,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ ++ if (no_cvt_hw && fg->graph) ++ need_reinit = 0; ++ + if (need_reinit) { + ret = ifilter_parameters_from_frame(ifilter, frame); + if (ret < 0) +@@ -2400,8 +2403,7 @@ static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_ decoded_frame->top_field_first = ist->top_field_first; ist->frames_decoded++; @@ -362,7 +230,24 @@ index 2e9448ea2b..faa8501dd0 100644 err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame); if (err < 0) goto fail; -@@ -2913,6 +2916,15 @@ static int init_input_stream(int ist_index, char *error, int error_len) +@@ -2819,6 +2821,16 @@ static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat + } else { + const HWAccel *hwaccel = NULL; + int i; ++ ++ if (no_cvt_hw) { ++ config = avcodec_get_hw_config(s->codec, 0); ++ if (config->device_type == AV_HWDEVICE_TYPE_NONE) { ++ av_log(s, AV_LOG_DEBUG, "no_cvt_hw so accepting pix_fmt %d with codec internal hwaccel\n", *p); ++ ist->hwaccel_pix_fmt = *p; ++ break; ++ } ++ } ++ + for (i = 0; hwaccels[i].name; i++) { + if (hwaccels[i].pix_fmt == *p) { + hwaccel = &hwaccels[i]; +@@ -2913,6 +2925,15 @@ static int init_input_stream(int ist_index, char *error, int error_len) return ret; } @@ -379,7 +264,7 @@ index 2e9448ea2b..faa8501dd0 100644 if (ret == AVERROR_EXPERIMENTAL) abort_codec_experimental(codec, 0); diff --git a/fftools/ffmpeg.h b/fftools/ffmpeg.h -index 828cb2a4ff..55d4db293e 100644 +index 828cb2a4ff..f2abea94d7 100644 --- a/fftools/ffmpeg.h +++ b/fftools/ffmpeg.h @@ -61,6 +61,7 @@ enum HWAccelID { @@ -398,14 +283,6 @@ index 828cb2a4ff..55d4db293e 100644 extern int do_deinterlace; extern int do_hex_dump; extern int do_pkt_dump; -@@ -653,6 +655,7 @@ int ffmpeg_parse_options(int argc, char **argv); - - int videotoolbox_init(AVCodecContext *s); - int qsv_init(AVCodecContext *s); -+int rpi_init(AVCodecContext *s); - - HWDevice *hw_device_get_by_name(const char *name); - int hw_device_init_from_string(const char *arg, HWDevice **dev); diff --git a/fftools/ffmpeg_filter.c b/fftools/ffmpeg_filter.c index 422e1268e9..deb89c076d 100644 --- a/fftools/ffmpeg_filter.c @@ -421,8 +298,21 @@ index 422e1268e9..deb89c076d 100644 ifilter->sample_aspect_ratio = frame->sample_aspect_ratio; ifilter->sample_rate = frame->sample_rate; +diff --git a/fftools/ffmpeg_hw.c b/fftools/ffmpeg_hw.c +index fc4a5d31d6..cc69dce40e 100644 +--- a/fftools/ffmpeg_hw.c ++++ b/fftools/ffmpeg_hw.c +@@ -75,6 +75,8 @@ static char *hw_device_default_name(enum AVHWDeviceType type) + char *name; + size_t index_pos; + int index, index_limit = 1000; ++ if (!type_name) ++ return NULL; + index_pos = strlen(type_name); + name = av_malloc(index_pos + 4); + if (!name) diff --git a/fftools/ffmpeg_opt.c b/fftools/ffmpeg_opt.c -index 2eb4e1c973..98207be2e2 100644 +index 2eb4e1c973..ffbfa9accf 100644 --- a/fftools/ffmpeg_opt.c +++ b/fftools/ffmpeg_opt.c @@ -130,12 +130,22 @@ static const char *opt_name_enc_time_bases[] = {"enc_time_base", NULL @@ -430,7 +320,7 @@ index 2eb4e1c973..98207be2e2 100644 } +#if CONFIG_RPI -+int rpi_init(AVCodecContext *avctx) { ++static int rpi_init(AVCodecContext *avctx) { + return 0; +} +#endif @@ -456,17 +346,7 @@ index 2eb4e1c973..98207be2e2 100644 int do_hex_dump = 0; int do_pkt_dump = 0; int copy_ts = 0; -@@ -755,7 +766,9 @@ static AVCodec *choose_decoder(OptionsContext *o, AVFormatContext *s, AVStream * - st->codecpar->codec_id = codec->id; - return codec; - } else -+ { - return avcodec_find_decoder(st->codecpar->codec_id); -+ } - } - - /* Add all the streams from the given input file to the global -@@ -3460,6 +3473,8 @@ const OptionDef options[] = { +@@ -3460,6 +3471,8 @@ const OptionDef options[] = { "add timings for benchmarking" }, { "benchmark_all", OPT_BOOL | OPT_EXPERT, { &do_benchmark_all }, "add timings for each task" }, @@ -476,7 +356,7 @@ index 2eb4e1c973..98207be2e2 100644 "write program-readable progress information", "url" }, { "stdin", OPT_BOOL | OPT_EXPERT, { &stdin_interaction }, diff --git a/libavcodec/Makefile b/libavcodec/Makefile -index 5a6ea59715..c9d056101d 100644 +index 5a6ea59715..3b589eb2f3 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -19,6 +19,7 @@ HEADERS = ac3_parser.h \ @@ -495,18 +375,22 @@ index 5a6ea59715..c9d056101d 100644 OBJS-$(CONFIG_RV34DSP) += rv34dsp.o OBJS-$(CONFIG_SHARED) += log2_tab.o reverse.o OBJS-$(CONFIG_SINEWIN) += sinewin.o sinewin_fixed.o -@@ -153,6 +155,7 @@ OBJS-$(CONFIG_VP3DSP) += vp3dsp.o +@@ -152,7 +154,10 @@ OBJS-$(CONFIG_VIDEODSP) += videodsp.o + OBJS-$(CONFIG_VP3DSP) += vp3dsp.o OBJS-$(CONFIG_VP56DSP) += vp56dsp.o OBJS-$(CONFIG_VP8DSP) += vp8dsp.o - OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_context.o v4l2_buffers.o v4l2_fmt.o -+OBJS-$(CONFIG_V4L2_REQUEST) += v4l2_request.o v4l2_phase.o +-OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_context.o v4l2_buffers.o v4l2_fmt.o ++OBJS-$(CONFIG_V4L2_M2M) += v4l2_m2m.o v4l2_context.o v4l2_buffers.o v4l2_fmt.o\ ++ weak_link.o ++OBJS-$(CONFIG_V4L2_REQUEST) += v4l2_req_media.o v4l2_req_pollqueue.o v4l2_req_dmabufs.o\ ++ v4l2_req_devscan.o weak_link.o OBJS-$(CONFIG_WMA_FREQS) += wma_freqs.o OBJS-$(CONFIG_WMV2DSP) += wmv2dsp.o -@@ -381,6 +384,15 @@ OBJS-$(CONFIG_HCOM_DECODER) += hcom.o - OBJS-$(CONFIG_HEVC_DECODER) += hevcdec.o hevc_mvs.o \ - hevc_cabac.o hevc_refs.o hevcpred.o \ - hevcdsp.o hevc_filter.o hevc_data.o +@@ -391,6 +396,14 @@ OBJS-$(CONFIG_HEVC_QSV_DECODER) += qsvdec_h2645.o + OBJS-$(CONFIG_HEVC_QSV_ENCODER) += qsvenc_hevc.o hevc_ps_enc.o \ + hevc_data.o + OBJS-$(CONFIG_HEVC_RKMPP_DECODER) += rkmppdec.o +OBJS-$(CONFIG_RPI) += rpi_mem.o \ + rpi_mailbox.o rpi_zc.o +OBJS-$(CONFIG_HEVC_RPI_DECODER) += rpi_hevcdec.o rpi_hevc_mvs.o \ @@ -515,52 +399,20 @@ index 5a6ea59715..c9d056101d 100644 + rpi_hevc_shader.o rpi_hevc_shader_template.o \ + rpi_hevc_parse.o h2645_parse.o rpi_hevc_ps.o \ + rpi_hevc_sei.o rpi_hevc_data.o rpi_qpu.o rpi_mem.o -+OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuvid.o - OBJS-$(CONFIG_HEVC_AMF_ENCODER) += amfenc_hevc.o - OBJS-$(CONFIG_HEVC_CUVID_DECODER) += cuviddec.o - OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o -@@ -902,6 +914,7 @@ OBJS-$(CONFIG_H264_D3D11VA_HWACCEL) += dxva2_h264.o - OBJS-$(CONFIG_H264_DXVA2_HWACCEL) += dxva2_h264.o - OBJS-$(CONFIG_H264_NVDEC_HWACCEL) += nvdec_h264.o - OBJS-$(CONFIG_H264_QSV_HWACCEL) += qsvdec_h2645.o -+OBJS-$(CONFIG_H264_V4L2REQUEST_HWACCEL) += v4l2_request_h264.o - OBJS-$(CONFIG_H264_VAAPI_HWACCEL) += vaapi_h264.o - OBJS-$(CONFIG_H264_VDPAU_HWACCEL) += vdpau_h264.o - OBJS-$(CONFIG_H264_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o -@@ -909,8 +922,11 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o + OBJS-$(CONFIG_HEVC_VAAPI_ENCODER) += vaapi_encode_h265.o h265_profile_level.o + OBJS-$(CONFIG_HEVC_V4L2M2M_DECODER) += v4l2_m2m_dec.o + OBJS-$(CONFIG_HEVC_V4L2M2M_ENCODER) += v4l2_m2m_enc.o +@@ -909,6 +922,9 @@ OBJS-$(CONFIG_HEVC_D3D11VA_HWACCEL) += dxva2_hevc.o OBJS-$(CONFIG_HEVC_DXVA2_HWACCEL) += dxva2_hevc.o OBJS-$(CONFIG_HEVC_NVDEC_HWACCEL) += nvdec_hevc.o OBJS-$(CONFIG_HEVC_QSV_HWACCEL) += qsvdec_h2645.o ++OBJS-$(CONFIG_HEVC_RPI4_8_HWACCEL) += rpivid_hevc.o ++OBJS-$(CONFIG_HEVC_RPI4_10_HWACCEL) += rpivid_hevc.o +OBJS-$(CONFIG_HEVC_V4L2REQUEST_HWACCEL) += v4l2_request_hevc.o OBJS-$(CONFIG_HEVC_VAAPI_HWACCEL) += vaapi_hevc.o h265_profile_level.o OBJS-$(CONFIG_HEVC_VDPAU_HWACCEL) += vdpau_hevc.o -+OBJS-$(CONFIG_HEVC_RPI4_8_HWACCEL) += rpivid_hevc.o -+OBJS-$(CONFIG_HEVC_RPI4_10_HWACCEL) += rpivid_hevc.o OBJS-$(CONFIG_MJPEG_NVDEC_HWACCEL) += nvdec_mjpeg.o - OBJS-$(CONFIG_MJPEG_VAAPI_HWACCEL) += vaapi_mjpeg.o - OBJS-$(CONFIG_MPEG1_NVDEC_HWACCEL) += nvdec_mpeg12.o -@@ -921,6 +937,7 @@ OBJS-$(CONFIG_MPEG2_D3D11VA_HWACCEL) += dxva2_mpeg2.o - OBJS-$(CONFIG_MPEG2_DXVA2_HWACCEL) += dxva2_mpeg2.o - OBJS-$(CONFIG_MPEG2_NVDEC_HWACCEL) += nvdec_mpeg12.o - OBJS-$(CONFIG_MPEG2_QSV_HWACCEL) += qsvdec_other.o -+OBJS-$(CONFIG_MPEG2_V4L2REQUEST_HWACCEL) += v4l2_request_mpeg2.o - OBJS-$(CONFIG_MPEG2_VAAPI_HWACCEL) += vaapi_mpeg2.o - OBJS-$(CONFIG_MPEG2_VDPAU_HWACCEL) += vdpau_mpeg12.o - OBJS-$(CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL) += videotoolbox.o -@@ -936,10 +953,12 @@ OBJS-$(CONFIG_VC1_QSV_HWACCEL) += qsvdec_other.o - OBJS-$(CONFIG_VC1_VAAPI_HWACCEL) += vaapi_vc1.o - OBJS-$(CONFIG_VC1_VDPAU_HWACCEL) += vdpau_vc1.o - OBJS-$(CONFIG_VP8_NVDEC_HWACCEL) += nvdec_vp8.o -+OBJS-$(CONFIG_VP8_V4L2REQUEST_HWACCEL) += v4l2_request_vp8.o - OBJS-$(CONFIG_VP8_VAAPI_HWACCEL) += vaapi_vp8.o - OBJS-$(CONFIG_VP9_D3D11VA_HWACCEL) += dxva2_vp9.o - OBJS-$(CONFIG_VP9_DXVA2_HWACCEL) += dxva2_vp9.o - OBJS-$(CONFIG_VP9_NVDEC_HWACCEL) += nvdec_vp9.o -+OBJS-$(CONFIG_VP9_V4L2REQUEST_HWACCEL) += v4l2_request_vp9.o - OBJS-$(CONFIG_VP9_VAAPI_HWACCEL) += vaapi_vp9.o - OBJS-$(CONFIG_VP9_VDPAU_HWACCEL) += vdpau_vp9.o - OBJS-$(CONFIG_VP8_QSV_HWACCEL) += qsvdec_other.o -@@ -1261,3 +1280,31 @@ $(SUBDIR)qdm2.o: $(SUBDIR)qdm2_tables.h +@@ -1261,3 +1277,31 @@ $(SUBDIR)qdm2.o: $(SUBDIR)qdm2_tables.h $(SUBDIR)sinewin.o: $(SUBDIR)sinewin_tables.h $(SUBDIR)sinewin_fixed.o: $(SUBDIR)sinewin_fixed_tables.h endif @@ -593,7 +445,7 @@ index 5a6ea59715..c9d056101d 100644 +$(SUBDIR)rpi_hevcdec.o $(SUBDIR)rpi_shader_template.o $(SUBDIR)rpi_qpu.o: $(SUBDIR)rpi_hevc_shader.h +endif diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c -index 80f128cade..ac4cf9a90e 100644 +index fa0c08d42e..89a25a398d 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -149,6 +149,7 @@ extern AVCodec ff_hap_decoder; @@ -15436,20 +15288,10 @@ index 0000000000..af8c4c03f0 + +endfunc diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h -index c91b2fd169..d6e019bbe1 100644 +index c91b2fd169..003079cdc6 100644 --- a/libavcodec/avcodec.h +++ b/libavcodec/avcodec.h -@@ -2236,8 +2236,7 @@ typedef struct AVCodecContext { - #define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 - #endif - -- /** -- * Audio only. The amount of padding (in samples) appended by the encoder to -+ /* Audio only. The amount of padding (in samples) appended by the encoder to - * the end of the audio. I.e. this number of decoded samples must be - * discarded by the caller from the end of the stream to get the original - * audio without any trailing padding. -@@ -2567,6 +2566,17 @@ typedef struct AVHWAccel { +@@ -2567,6 +2567,17 @@ typedef struct AVHWAccel { * that avctx->hwaccel_priv_data is invalid. */ int (*frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); @@ -15509,376 +15351,6 @@ index 1fda619ee7..b4650f9ec9 100644 /** * Find a registered decoder with the specified name. * -diff --git a/libavcodec/h264-ctrls.h b/libavcodec/h264-ctrls.h -new file mode 100644 -index 0000000000..ec47991544 ---- /dev/null -+++ b/libavcodec/h264-ctrls.h -@@ -0,0 +1,231 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * These are the H.264 state controls for use with stateless H.264 -+ * codec drivers. -+ * -+ * It turns out that these structs are not stable yet and will undergo -+ * more changes. So keep them private until they are stable and ready to -+ * become part of the official public API. -+ */ -+ -+#ifndef _H264_CTRLS_H_ -+#define _H264_CTRLS_H_ -+ -+#include -+ -+/* -+ * Maximum DPB size, as specified by section 'A.3.1 Level limits -+ * common to the Baseline, Main, and Extended profiles'. -+ */ -+#define V4L2_H264_NUM_DPB_ENTRIES 16 -+ -+#define V4L2_H264_REF_LIST_LEN (2 * V4L2_H264_NUM_DPB_ENTRIES) -+ -+/* Our pixel format isn't stable at the moment */ -+#define V4L2_PIX_FMT_H264_SLICE v4l2_fourcc('S', '2', '6', '4') /* H264 parsed slices */ -+ -+/* -+ * This is put insanely high to avoid conflicting with controls that -+ * would be added during the phase where those controls are not -+ * stable. It should be fixed eventually. -+ */ -+#define V4L2_CID_MPEG_VIDEO_H264_SPS (V4L2_CID_MPEG_BASE+1000) -+#define V4L2_CID_MPEG_VIDEO_H264_PPS (V4L2_CID_MPEG_BASE+1001) -+#define V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX (V4L2_CID_MPEG_BASE+1002) -+#define V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS (V4L2_CID_MPEG_BASE+1003) -+#define V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS (V4L2_CID_MPEG_BASE+1004) -+#define V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE (V4L2_CID_MPEG_BASE+1005) -+#define V4L2_CID_MPEG_VIDEO_H264_START_CODE (V4L2_CID_MPEG_BASE+1006) -+#define V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS (V4L2_CID_MPEG_BASE+1007) -+ -+/* enum v4l2_ctrl_type type values */ -+#define V4L2_CTRL_TYPE_H264_SPS 0x0110 -+#define V4L2_CTRL_TYPE_H264_PPS 0x0111 -+#define V4L2_CTRL_TYPE_H264_SCALING_MATRIX 0x0112 -+#define V4L2_CTRL_TYPE_H264_SLICE_PARAMS 0x0113 -+#define V4L2_CTRL_TYPE_H264_DECODE_PARAMS 0x0114 -+#define V4L2_CTRL_TYPE_H264_PRED_WEIGHTS 0x0115 -+ -+enum v4l2_mpeg_video_h264_decode_mode { -+ V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED, -+ V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED, -+}; -+ -+enum v4l2_mpeg_video_h264_start_code { -+ V4L2_MPEG_VIDEO_H264_START_CODE_NONE, -+ V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B, -+}; -+ -+#define V4L2_H264_SPS_CONSTRAINT_SET0_FLAG 0x01 -+#define V4L2_H264_SPS_CONSTRAINT_SET1_FLAG 0x02 -+#define V4L2_H264_SPS_CONSTRAINT_SET2_FLAG 0x04 -+#define V4L2_H264_SPS_CONSTRAINT_SET3_FLAG 0x08 -+#define V4L2_H264_SPS_CONSTRAINT_SET4_FLAG 0x10 -+#define V4L2_H264_SPS_CONSTRAINT_SET5_FLAG 0x20 -+ -+#define V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE 0x01 -+#define V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS 0x02 -+#define V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO 0x04 -+#define V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED 0x08 -+#define V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY 0x10 -+#define V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD 0x20 -+#define V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE 0x40 -+ -+struct v4l2_ctrl_h264_sps { -+ __u8 profile_idc; -+ __u8 constraint_set_flags; -+ __u8 level_idc; -+ __u8 seq_parameter_set_id; -+ __u8 chroma_format_idc; -+ __u8 bit_depth_luma_minus8; -+ __u8 bit_depth_chroma_minus8; -+ __u8 log2_max_frame_num_minus4; -+ __u8 pic_order_cnt_type; -+ __u8 log2_max_pic_order_cnt_lsb_minus4; -+ __u8 max_num_ref_frames; -+ __u8 num_ref_frames_in_pic_order_cnt_cycle; -+ __s32 offset_for_ref_frame[255]; -+ __s32 offset_for_non_ref_pic; -+ __s32 offset_for_top_to_bottom_field; -+ __u16 pic_width_in_mbs_minus1; -+ __u16 pic_height_in_map_units_minus1; -+ __u32 flags; -+}; -+ -+#define V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE 0x0001 -+#define V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT 0x0002 -+#define V4L2_H264_PPS_FLAG_WEIGHTED_PRED 0x0004 -+#define V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT 0x0008 -+#define V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED 0x0010 -+#define V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT 0x0020 -+#define V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE 0x0040 -+#define V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT 0x0080 -+ -+struct v4l2_ctrl_h264_pps { -+ __u8 pic_parameter_set_id; -+ __u8 seq_parameter_set_id; -+ __u8 num_slice_groups_minus1; -+ __u8 num_ref_idx_l0_default_active_minus1; -+ __u8 num_ref_idx_l1_default_active_minus1; -+ __u8 weighted_bipred_idc; -+ __s8 pic_init_qp_minus26; -+ __s8 pic_init_qs_minus26; -+ __s8 chroma_qp_index_offset; -+ __s8 second_chroma_qp_index_offset; -+ __u16 flags; -+}; -+ -+struct v4l2_ctrl_h264_scaling_matrix { -+ __u8 scaling_list_4x4[6][16]; -+ __u8 scaling_list_8x8[6][64]; -+}; -+ -+struct v4l2_h264_weight_factors { -+ __s16 luma_weight[32]; -+ __s16 luma_offset[32]; -+ __s16 chroma_weight[32][2]; -+ __s16 chroma_offset[32][2]; -+}; -+ -+#define V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(pps, slice) \ -+ ((((pps)->flags & V4L2_H264_PPS_FLAG_WEIGHTED_PRED) && \ -+ ((slice)->slice_type == V4L2_H264_SLICE_TYPE_P || \ -+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_SP)) || \ -+ ((pps)->weighted_bipred_idc == 1 && \ -+ (slice)->slice_type == V4L2_H264_SLICE_TYPE_B)) -+ -+struct v4l2_ctrl_h264_pred_weights { -+ __u16 luma_log2_weight_denom; -+ __u16 chroma_log2_weight_denom; -+ struct v4l2_h264_weight_factors weight_factors[2]; -+}; -+ -+#define V4L2_H264_SLICE_TYPE_P 0 -+#define V4L2_H264_SLICE_TYPE_B 1 -+#define V4L2_H264_SLICE_TYPE_I 2 -+#define V4L2_H264_SLICE_TYPE_SP 3 -+#define V4L2_H264_SLICE_TYPE_SI 4 -+ -+#define V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED 0x01 -+#define V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH 0x02 -+ -+#define V4L2_H264_TOP_FIELD_REF 0x1 -+#define V4L2_H264_BOTTOM_FIELD_REF 0x2 -+#define V4L2_H264_FRAME_REF 0x3 -+ -+struct v4l2_h264_reference { -+ __u8 fields; -+ -+ /* Index into v4l2_ctrl_h264_decode_params.dpb[] */ -+ __u8 index; -+}; -+ -+struct v4l2_ctrl_h264_slice_params { -+ /* Offset in bits to slice_data() from the beginning of this slice. */ -+ __u32 header_bit_size; -+ -+ __u32 first_mb_in_slice; -+ -+ __u8 slice_type; -+ __u8 colour_plane_id; -+ __u8 redundant_pic_cnt; -+ __u8 cabac_init_idc; -+ __s8 slice_qp_delta; -+ __s8 slice_qs_delta; -+ __u8 disable_deblocking_filter_idc; -+ __s8 slice_alpha_c0_offset_div2; -+ __s8 slice_beta_offset_div2; -+ __u8 num_ref_idx_l0_active_minus1; -+ __u8 num_ref_idx_l1_active_minus1; -+ -+ __u8 reserved; -+ -+ struct v4l2_h264_reference ref_pic_list0[V4L2_H264_REF_LIST_LEN]; -+ struct v4l2_h264_reference ref_pic_list1[V4L2_H264_REF_LIST_LEN]; -+ -+ __u32 flags; -+}; -+ -+#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01 -+#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02 -+#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04 -+#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08 -+ -+struct v4l2_h264_dpb_entry { -+ __u64 reference_ts; -+ __u32 pic_num; -+ __u16 frame_num; -+ __u8 fields; -+ __u8 reserved[5]; -+ /* Note that field is indicated by v4l2_buffer.field */ -+ __s32 top_field_order_cnt; -+ __s32 bottom_field_order_cnt; -+ __u32 flags; /* V4L2_H264_DPB_ENTRY_FLAG_* */ -+}; -+ -+#define V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC 0x01 -+#define V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC 0x02 -+#define V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD 0x04 -+ -+struct v4l2_ctrl_h264_decode_params { -+ struct v4l2_h264_dpb_entry dpb[V4L2_H264_NUM_DPB_ENTRIES]; -+ __u16 nal_ref_idc; -+ __u16 frame_num; -+ __s32 top_field_order_cnt; -+ __s32 bottom_field_order_cnt; -+ __u16 idr_pic_id; -+ __u16 pic_order_cnt_lsb; -+ __s32 delta_pic_order_cnt_bottom; -+ __s32 delta_pic_order_cnt0; -+ __s32 delta_pic_order_cnt1; -+ /* Size in bits of dec_ref_pic_marking() syntax element. */ -+ __u32 dec_ref_pic_marking_bit_size; -+ /* Size in bits of pic order count syntax. */ -+ __u32 pic_order_cnt_bit_size; -+ __u32 slice_group_change_cycle; -+ -+ __u32 reserved; -+ __u32 flags; /* V4L2_H264_DECODE_PARAM_FLAG_* */ -+}; -+ -+#endif -diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c -index db8363e4cc..39ae8fabfd 100644 ---- a/libavcodec/h264_slice.c -+++ b/libavcodec/h264_slice.c -@@ -759,6 +759,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) - #define HWACCEL_MAX (CONFIG_H264_DXVA2_HWACCEL + \ - (CONFIG_H264_D3D11VA_HWACCEL * 2) + \ - CONFIG_H264_NVDEC_HWACCEL + \ -+ CONFIG_H264_V4L2REQUEST_HWACCEL + \ - CONFIG_H264_VAAPI_HWACCEL + \ - CONFIG_H264_VIDEOTOOLBOX_HWACCEL + \ - CONFIG_H264_VDPAU_HWACCEL) -@@ -784,10 +785,17 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) - *fmt++ = AV_PIX_FMT_GBRP10; - } else - *fmt++ = AV_PIX_FMT_YUV444P10; -- } else if (CHROMA422(h)) -+ } else if (CHROMA422(h)) { -+#if CONFIG_H264_V4L2REQUEST_HWACCEL -+ *fmt++ = AV_PIX_FMT_DRM_PRIME; -+#endif - *fmt++ = AV_PIX_FMT_YUV422P10; -- else -+ } else { -+#if CONFIG_H264_V4L2REQUEST_HWACCEL -+ *fmt++ = AV_PIX_FMT_DRM_PRIME; -+#endif - *fmt++ = AV_PIX_FMT_YUV420P10; -+ } - break; - case 12: - if (CHROMA444(h)) { -@@ -826,6 +834,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) - else - *fmt++ = AV_PIX_FMT_YUV444P; - } else if (CHROMA422(h)) { -+#if CONFIG_H264_V4L2REQUEST_HWACCEL -+ *fmt++ = AV_PIX_FMT_DRM_PRIME; -+#endif - if (h->avctx->color_range == AVCOL_RANGE_JPEG) - *fmt++ = AV_PIX_FMT_YUVJ422P; - else -@@ -843,6 +854,9 @@ static enum AVPixelFormat get_pixel_format(H264Context *h, int force_callback) - #endif - #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL - *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; -+#endif -+#if CONFIG_H264_V4L2REQUEST_HWACCEL -+ *fmt++ = AV_PIX_FMT_DRM_PRIME; - #endif - if (h->avctx->codec->pix_fmts) - choices = h->avctx->codec->pix_fmts; -@@ -1736,7 +1750,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, - unsigned int slice_type, tmp, i; - int field_pic_flag, bottom_field_flag; - int first_slice = sl == h->slice_ctx && !h->current_slice; -- int picture_structure; -+ int picture_structure, pos; - - if (first_slice) - av_assert0(!h->setup_finished); -@@ -1818,8 +1832,9 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, - } - - if (nal->type == H264_NAL_IDR_SLICE) -- get_ue_golomb_long(&sl->gb); /* idr_pic_id */ -+ sl->idr_pic_id = get_ue_golomb_long(&sl->gb); - -+ pos = sl->gb.index; - if (sps->poc_type == 0) { - sl->poc_lsb = get_bits(&sl->gb, sps->log2_max_poc_lsb); - -@@ -1833,6 +1848,7 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, - if (pps->pic_order_present == 1 && picture_structure == PICT_FRAME) - sl->delta_poc[1] = get_se_golomb(&sl->gb); - } -+ sl->pic_order_cnt_bit_size = sl->gb.index - pos; - - sl->redundant_pic_count = 0; - if (pps->redundant_pic_cnt_present) -@@ -1872,9 +1888,11 @@ static int h264_slice_header_parse(const H264Context *h, H264SliceContext *sl, - - sl->explicit_ref_marking = 0; - if (nal->ref_idc) { -+ int bit_pos = sl->gb.index; - ret = ff_h264_decode_ref_pic_marking(sl, &sl->gb, nal, h->avctx); - if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE)) - return AVERROR_INVALIDDATA; -+ sl->ref_pic_marking_size_in_bits = sl->gb.index - bit_pos; - } - - if (sl->slice_type_nos != AV_PICTURE_TYPE_I && pps->cabac) { -diff --git a/libavcodec/h264dec.c b/libavcodec/h264dec.c -index 5eedeb3c27..a504c89565 100644 ---- a/libavcodec/h264dec.c -+++ b/libavcodec/h264dec.c -@@ -1102,6 +1102,9 @@ AVCodec ff_h264_decoder = { - #endif - #if CONFIG_H264_VIDEOTOOLBOX_HWACCEL - HWACCEL_VIDEOTOOLBOX(h264), -+#endif -+#if CONFIG_H264_V4L2REQUEST_HWACCEL -+ HWACCEL_V4L2REQUEST(h264), - #endif - NULL - }, -diff --git a/libavcodec/h264dec.h b/libavcodec/h264dec.h -index a419615124..b3dcd6e7da 100644 ---- a/libavcodec/h264dec.h -+++ b/libavcodec/h264dec.h -@@ -190,6 +190,8 @@ typedef struct H264SliceContext { - int slice_type_nos; ///< S free slice type (SI/SP are remapped to I/P) - int slice_type_fixed; - -+ int idr_pic_id; -+ - int qscale; - int chroma_qp[2]; // QPc - int qp_thresh; ///< QP threshold to skip loopfilter -@@ -328,11 +330,13 @@ typedef struct H264SliceContext { - MMCO mmco[MAX_MMCO_COUNT]; - int nb_mmco; - int explicit_ref_marking; -+ int ref_pic_marking_size_in_bits; - - int frame_num; - int poc_lsb; - int delta_poc_bottom; - int delta_poc[2]; -+ int pic_order_cnt_bit_size; - int curr_pic_num; - int max_pic_num; - } H264SliceContext; diff --git a/libavcodec/hevc-ctrls.h b/libavcodec/hevc-ctrls.h new file mode 100644 index 0000000000..13698d3f33 @@ -16115,11 +15587,55 @@ index 0000000000..13698d3f33 +}; + +#endif +diff --git a/libavcodec/hevc_parser.c b/libavcodec/hevc_parser.c +index 5af4b788d5..c7314a6af8 100644 +--- a/libavcodec/hevc_parser.c ++++ b/libavcodec/hevc_parser.c +@@ -98,6 +98,19 @@ static int hevc_parse_slice_header(AVCodecParserContext *s, H2645NAL *nal, + avctx->profile = ps->sps->ptl.general_ptl.profile_idc; + avctx->level = ps->sps->ptl.general_ptl.level_idc; + ++ if (ps->sps->chroma_format_idc == 1) { ++ avctx->chroma_sample_location = ps->sps->vui.chroma_loc_info_present_flag ? ++ ps->sps->vui.chroma_sample_loc_type_top_field + 1 : ++ AVCHROMA_LOC_LEFT; ++ } ++ else if (ps->sps->chroma_format_idc == 2 || ++ ps->sps->chroma_format_idc == 3) { ++ avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;; ++ } ++ else { ++ avctx->chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; ++ } ++ + if (ps->vps->vps_timing_info_present_flag) { + num = ps->vps->vps_num_units_in_tick; + den = ps->vps->vps_time_scale; diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c -index 0772608a30..c30fb2a83f 100644 +index 1eaeaf72f1..b6871ff2e2 100644 --- a/libavcodec/hevcdec.c +++ b/libavcodec/hevcdec.c -@@ -372,14 +372,20 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) +@@ -332,6 +332,19 @@ static void export_stream_params(HEVCContext *s, const HEVCSPS *sps) + + ff_set_sar(avctx, sps->vui.sar); + ++ if (sps->chroma_format_idc == 1) { ++ avctx->chroma_sample_location = sps->vui.chroma_loc_info_present_flag ? ++ sps->vui.chroma_sample_loc_type_top_field + 1 : ++ AVCHROMA_LOC_LEFT; ++ } ++ else if (sps->chroma_format_idc == 2 || ++ sps->chroma_format_idc == 3) { ++ avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;; ++ } ++ else { ++ avctx->chroma_sample_location = AVCHROMA_LOC_UNSPECIFIED; ++ } ++ + if (sps->vui.video_signal_type_present_flag) + avctx->color_range = sps->vui.video_full_range_flag ? AVCOL_RANGE_JPEG + : AVCOL_RANGE_MPEG; +@@ -372,14 +385,20 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) #define HWACCEL_MAX (CONFIG_HEVC_DXVA2_HWACCEL + \ CONFIG_HEVC_D3D11VA_HWACCEL * 2 + \ CONFIG_HEVC_NVDEC_HWACCEL + \ @@ -16140,7 +15656,7 @@ index 0772608a30..c30fb2a83f 100644 #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif -@@ -398,9 +404,15 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) +@@ -398,9 +417,15 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) #endif #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL *fmt++ = AV_PIX_FMT_VIDEOTOOLBOX; @@ -16156,7 +15672,7 @@ index 0772608a30..c30fb2a83f 100644 #if CONFIG_HEVC_DXVA2_HWACCEL *fmt++ = AV_PIX_FMT_DXVA2_VLD; #endif -@@ -416,6 +428,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) +@@ -416,6 +441,9 @@ static enum AVPixelFormat get_format(HEVCContext *s, const HEVCSPS *sps) #endif #if CONFIG_HEVC_NVDEC_HWACCEL *fmt++ = AV_PIX_FMT_CUDA; @@ -16166,7 +15682,7 @@ index 0772608a30..c30fb2a83f 100644 #endif break; case AV_PIX_FMT_YUV444P: -@@ -3225,7 +3240,14 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, +@@ -3230,7 +3258,14 @@ static int hevc_decode_frame(AVCodecContext *avctx, void *data, int *got_output, s->ref = NULL; ret = decode_nal_units(s, avpkt->data, avpkt->size); if (ret < 0) @@ -16181,11 +15697,10 @@ index 0772608a30..c30fb2a83f 100644 if (avctx->hwaccel) { if (s->ref && (ret = avctx->hwaccel->end_frame(avctx)) < 0) { -@@ -3588,6 +3610,15 @@ AVCodec ff_hevc_decoder = { +@@ -3585,6 +3620,15 @@ AVCodec ff_hevc_decoder = { + #if CONFIG_HEVC_NVDEC_HWACCEL + HWACCEL_NVDEC(hevc), #endif - #if CONFIG_HEVC_VIDEOTOOLBOX_HWACCEL - HWACCEL_VIDEOTOOLBOX(hevc), -+#endif +#if CONFIG_HEVC_RPI4_8_HWACCEL + HWACCEL_RPI4_8(hevc), +#endif @@ -16194,61 +15709,26 @@ index 0772608a30..c30fb2a83f 100644 +#endif +#if CONFIG_HEVC_V4L2REQUEST_HWACCEL + HWACCEL_V4L2REQUEST(hevc), ++#endif + #if CONFIG_HEVC_VAAPI_HWACCEL + HWACCEL_VAAPI(hevc), #endif - NULL - }, diff --git a/libavcodec/hwaccels.h b/libavcodec/hwaccels.h -index 6109c89bd6..30927fda99 100644 +index 6109c89bd6..81d200d5ec 100644 --- a/libavcodec/hwaccels.h +++ b/libavcodec/hwaccels.h -@@ -27,6 +27,7 @@ extern const AVHWAccel ff_h264_d3d11va_hwaccel; - extern const AVHWAccel ff_h264_d3d11va2_hwaccel; - extern const AVHWAccel ff_h264_dxva2_hwaccel; - extern const AVHWAccel ff_h264_nvdec_hwaccel; -+extern const AVHWAccel ff_h264_v4l2request_hwaccel; - extern const AVHWAccel ff_h264_vaapi_hwaccel; - extern const AVHWAccel ff_h264_vdpau_hwaccel; - extern const AVHWAccel ff_h264_videotoolbox_hwaccel; -@@ -34,6 +35,7 @@ extern const AVHWAccel ff_hevc_d3d11va_hwaccel; +@@ -34,6 +34,9 @@ extern const AVHWAccel ff_hevc_d3d11va_hwaccel; extern const AVHWAccel ff_hevc_d3d11va2_hwaccel; extern const AVHWAccel ff_hevc_dxva2_hwaccel; extern const AVHWAccel ff_hevc_nvdec_hwaccel; ++extern const AVHWAccel ff_hevc_rpi4_8_hwaccel; ++extern const AVHWAccel ff_hevc_rpi4_10_hwaccel; +extern const AVHWAccel ff_hevc_v4l2request_hwaccel; extern const AVHWAccel ff_hevc_vaapi_hwaccel; extern const AVHWAccel ff_hevc_vdpau_hwaccel; extern const AVHWAccel ff_hevc_videotoolbox_hwaccel; -@@ -47,6 +49,7 @@ extern const AVHWAccel ff_mpeg2_d3d11va_hwaccel; - extern const AVHWAccel ff_mpeg2_d3d11va2_hwaccel; - extern const AVHWAccel ff_mpeg2_nvdec_hwaccel; - extern const AVHWAccel ff_mpeg2_dxva2_hwaccel; -+extern const AVHWAccel ff_mpeg2_v4l2request_hwaccel; - extern const AVHWAccel ff_mpeg2_vaapi_hwaccel; - extern const AVHWAccel ff_mpeg2_vdpau_hwaccel; - extern const AVHWAccel ff_mpeg2_videotoolbox_hwaccel; -@@ -62,11 +65,13 @@ extern const AVHWAccel ff_vc1_nvdec_hwaccel; - extern const AVHWAccel ff_vc1_vaapi_hwaccel; - extern const AVHWAccel ff_vc1_vdpau_hwaccel; - extern const AVHWAccel ff_vp8_nvdec_hwaccel; -+extern const AVHWAccel ff_vp8_v4l2request_hwaccel; - extern const AVHWAccel ff_vp8_vaapi_hwaccel; - extern const AVHWAccel ff_vp9_d3d11va_hwaccel; - extern const AVHWAccel ff_vp9_d3d11va2_hwaccel; - extern const AVHWAccel ff_vp9_dxva2_hwaccel; - extern const AVHWAccel ff_vp9_nvdec_hwaccel; -+extern const AVHWAccel ff_vp9_v4l2request_hwaccel; - extern const AVHWAccel ff_vp9_vaapi_hwaccel; - extern const AVHWAccel ff_vp9_vdpau_hwaccel; - extern const AVHWAccel ff_wmv3_d3d11va_hwaccel; -@@ -75,5 +80,7 @@ extern const AVHWAccel ff_wmv3_dxva2_hwaccel; - extern const AVHWAccel ff_wmv3_nvdec_hwaccel; - extern const AVHWAccel ff_wmv3_vaapi_hwaccel; - extern const AVHWAccel ff_wmv3_vdpau_hwaccel; -+extern const AVHWAccel ff_hevc_rpi4_8_hwaccel; -+extern const AVHWAccel ff_hevc_rpi4_10_hwaccel; - - #endif /* AVCODEC_HWACCELS_H */ diff --git a/libavcodec/hwconfig.h b/libavcodec/hwconfig.h -index f421dc909f..ed44e01de4 100644 +index f421dc909f..f93283b893 100644 --- a/libavcodec/hwconfig.h +++ b/libavcodec/hwconfig.h @@ -24,6 +24,7 @@ @@ -16259,19 +15739,19 @@ index f421dc909f..ed44e01de4 100644 typedef struct AVCodecHWConfigInternal { -@@ -80,6 +81,12 @@ typedef struct AVCodecHWConfigInternal { - HW_CONFIG_HWACCEL(0, 0, 1, D3D11VA_VLD, NONE, ff_ ## codec ## _d3d11va_hwaccel) - #define HWACCEL_XVMC(codec) \ - HW_CONFIG_HWACCEL(0, 0, 1, XVMC, NONE, ff_ ## codec ## _xvmc_hwaccel) -+#define HWACCEL_V4L2REQUEST(codec) \ -+ HW_CONFIG_HWACCEL(1, 0, 0, DRM_PRIME, DRM, ff_ ## codec ## _v4l2request_hwaccel) +@@ -70,6 +71,12 @@ typedef struct AVCodecHWConfigInternal { + HW_CONFIG_HWACCEL(1, 1, 0, D3D11, D3D11VA, ff_ ## codec ## _d3d11va2_hwaccel) + #define HWACCEL_NVDEC(codec) \ + HW_CONFIG_HWACCEL(1, 1, 0, CUDA, CUDA, ff_ ## codec ## _nvdec_hwaccel) +#define HWACCEL_RPI4_8(codec) \ + HW_CONFIG_HWACCEL(0, 0, 1, RPI4_8, NONE, ff_ ## codec ## _rpi4_8_hwaccel) +#define HWACCEL_RPI4_10(codec) \ + HW_CONFIG_HWACCEL(0, 0, 1, RPI4_10, NONE, ff_ ## codec ## _rpi4_10_hwaccel) - - #define HW_CONFIG_ENCODER(device, frames, ad_hoc, format, device_type_) \ - &(const AVCodecHWConfigInternal) { \ ++#define HWACCEL_V4L2REQUEST(codec) \ ++ HW_CONFIG_HWACCEL(1, 0, 0, DRM_PRIME, DRM, ff_ ## codec ## _v4l2request_hwaccel) + #define HWACCEL_VAAPI(codec) \ + HW_CONFIG_HWACCEL(1, 1, 1, VAAPI, VAAPI, ff_ ## codec ## _vaapi_hwaccel) + #define HWACCEL_VDPAU(codec) \ diff --git a/libavcodec/mmaldec.c b/libavcodec/mmaldec.c index 547bece576..bfd1083c16 100644 --- a/libavcodec/mmaldec.c @@ -16294,118 +15774,6 @@ index 547bece576..bfd1083c16 100644 #include #include "avcodec.h" -diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c -index 99e56532a5..15aaf97a34 100644 ---- a/libavcodec/mpeg12dec.c -+++ b/libavcodec/mpeg12dec.c -@@ -1154,6 +1154,9 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { - #endif - #if CONFIG_MPEG2_VIDEOTOOLBOX_HWACCEL - AV_PIX_FMT_VIDEOTOOLBOX, -+#endif -+#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL -+ AV_PIX_FMT_DRM_PRIME, - #endif - AV_PIX_FMT_YUV420P, - AV_PIX_FMT_NONE -@@ -2952,6 +2955,9 @@ AVCodec ff_mpeg2video_decoder = { - #endif - #if CONFIG_MPEG2_XVMC_HWACCEL - HWACCEL_XVMC(mpeg2), -+#endif -+#if CONFIG_MPEG2_V4L2REQUEST_HWACCEL -+ HWACCEL_V4L2REQUEST(mpeg2), - #endif - NULL - }, -diff --git a/libavcodec/mpeg2-ctrls.h b/libavcodec/mpeg2-ctrls.h -new file mode 100644 -index 0000000000..6601455b3d ---- /dev/null -+++ b/libavcodec/mpeg2-ctrls.h -@@ -0,0 +1,82 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * These are the MPEG2 state controls for use with stateless MPEG-2 -+ * codec drivers. -+ * -+ * It turns out that these structs are not stable yet and will undergo -+ * more changes. So keep them private until they are stable and ready to -+ * become part of the official public API. -+ */ -+ -+#ifndef _MPEG2_CTRLS_H_ -+#define _MPEG2_CTRLS_H_ -+ -+#define V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS (V4L2_CID_MPEG_BASE+250) -+#define V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION (V4L2_CID_MPEG_BASE+251) -+ -+/* enum v4l2_ctrl_type type values */ -+#define V4L2_CTRL_TYPE_MPEG2_SLICE_PARAMS 0x0103 -+#define V4L2_CTRL_TYPE_MPEG2_QUANTIZATION 0x0104 -+ -+#define V4L2_MPEG2_PICTURE_CODING_TYPE_I 1 -+#define V4L2_MPEG2_PICTURE_CODING_TYPE_P 2 -+#define V4L2_MPEG2_PICTURE_CODING_TYPE_B 3 -+#define V4L2_MPEG2_PICTURE_CODING_TYPE_D 4 -+ -+struct v4l2_mpeg2_sequence { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ -+ __u16 horizontal_size; -+ __u16 vertical_size; -+ __u32 vbv_buffer_size; -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ -+ __u16 profile_and_level_indication; -+ __u8 progressive_sequence; -+ __u8 chroma_format; -+}; -+ -+struct v4l2_mpeg2_picture { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ -+ __u8 picture_coding_type; -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ -+ __u8 f_code[2][2]; -+ __u8 intra_dc_precision; -+ __u8 picture_structure; -+ __u8 top_field_first; -+ __u8 frame_pred_frame_dct; -+ __u8 concealment_motion_vectors; -+ __u8 q_scale_type; -+ __u8 intra_vlc_format; -+ __u8 alternate_scan; -+ __u8 repeat_first_field; -+ __u16 progressive_frame; -+}; -+ -+struct v4l2_ctrl_mpeg2_slice_params { -+ __u32 bit_size; -+ __u32 data_bit_offset; -+ __u64 backward_ref_ts; -+ __u64 forward_ref_ts; -+ -+ struct v4l2_mpeg2_sequence sequence; -+ struct v4l2_mpeg2_picture picture; -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ -+ __u32 quantiser_scale_code; -+}; -+ -+struct v4l2_ctrl_mpeg2_quantization { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ -+ __u8 load_intra_quantiser_matrix; -+ __u8 load_non_intra_quantiser_matrix; -+ __u8 load_chroma_intra_quantiser_matrix; -+ __u8 load_chroma_non_intra_quantiser_matrix; -+ -+ __u8 intra_quantiser_matrix[64]; -+ __u8 non_intra_quantiser_matrix[64]; -+ __u8 chroma_intra_quantiser_matrix[64]; -+ __u8 chroma_non_intra_quantiser_matrix[64]; -+}; -+ -+#endif diff --git a/libavcodec/pthread_frame.c b/libavcodec/pthread_frame.c index 601f170447..f890f99931 100644 --- a/libavcodec/pthread_frame.c @@ -16432,26 +15800,18 @@ index 601f170447..f890f99931 100644 p->hwaccel_serializing = 1; } diff --git a/libavcodec/raw.c b/libavcodec/raw.c -index b6fb91c1c6..7b2770e780 100644 +index b6fb91c1c6..f3afb6e09a 100644 --- a/libavcodec/raw.c +++ b/libavcodec/raw.c -@@ -289,10 +289,20 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = { - { AV_PIX_FMT_YUV444P16LE, MKTAG('I', '4', 'F', 'L') }, - { AV_PIX_FMT_YUV444P16BE, MKTAG('I', '4', 'F', 'B') }, - -+ /* RPI (Might as well define for everything) */ -+ { AV_PIX_FMT_SAND128, MKTAG('S', 'A', 'N', 'D') }, -+ { AV_PIX_FMT_SAND64_10, MKTAG('S', 'N', 'D', 'A') }, -+ - /* special */ +@@ -293,6 +293,12 @@ const PixelFormatTag ff_raw_pix_fmt_tags[] = { { AV_PIX_FMT_RGB565LE,MKTAG( 3 , 0 , 0 , 0 ) }, /* flipped RGB565LE */ { AV_PIX_FMT_YUV444P, MKTAG('Y', 'V', '2', '4') }, /* YUV444P, swapped UV */ -+ /* RPI (Might as well define for everything) */ -+ { AV_PIX_FMT_SAND128, MKTAG('S', 'A', 'N', 'D') }, -+ { AV_PIX_FMT_RPI4_8, MKTAG('S', 'A', 'N', 'D') }, -+ { AV_PIX_FMT_SAND64_10, MKTAG('S', 'N', 'D', 'A') }, -+ { AV_PIX_FMT_RPI4_10, MKTAG('S', 'N', 'D', 'B') }, ++ /* RPI (Might as well define for everything) */ ++ { AV_PIX_FMT_SAND128, MKTAG('S', 'A', 'N', 'D') }, ++ { AV_PIX_FMT_RPI4_8, MKTAG('S', 'A', 'N', 'D') }, ++ { AV_PIX_FMT_SAND64_10, MKTAG('S', 'N', 'D', 'A') }, ++ { AV_PIX_FMT_RPI4_10, MKTAG('S', 'N', 'D', 'B') }, + { AV_PIX_FMT_NONE, 0 }, }; @@ -44538,10 +43898,10 @@ index 0000000000..9b7b6536a4 +#endif diff --git a/libavcodec/rpivid_hevc.c b/libavcodec/rpivid_hevc.c new file mode 100644 -index 0000000000..a6b5e8a189 +index 0000000000..85c5b46d75 --- /dev/null +++ b/libavcodec/rpivid_hevc.c -@@ -0,0 +1,2033 @@ +@@ -0,0 +1,2128 @@ +// FFMPEG HEVC decoder hardware accelerator +// Andrew Holme, Argon Design Ltd +// Copyright (c) June 2017 Raspberry Pi Ltd @@ -44570,6 +43930,11 @@ index 0000000000..a6b5e8a189 + +#define OPT_PHASE_TIMING 0 // Generate stats for phase usage + ++#define OPT_EMU 0 ++ ++#define TRACE_DEV 0 ++#define TRACE_ENTRY 0 ++ +#define NUM_SCALING_FACTORS 4064 + +#define AXI_BASE64 0 @@ -44767,6 +44132,10 @@ index 0000000000..a6b5e8a189 + uint32_t reg_slicestart; + unsigned int wpp_entry_x; + unsigned int wpp_entry_y; ++ ++ const uint8_t * nal_buffer; ++ size_t nal_size; ++ + uint16_t slice_msgs[2*HEVC_MAX_REFS*8+3]; + uint8_t scaling_factors[NUM_SCALING_FACTORS]; +// unsigned int RefPicList[2][HEVC_MAX_REFS]; @@ -44854,9 +44223,6 @@ index 0000000000..a6b5e8a189 + +//============================================================================ + -+#define TRACE_DEV 0 -+#define TRACE_ENTRY 0 -+ +#define REGS_NAME "/dev/rpivid-hevcmem" +#define REGS_SIZE 0x10000 +#define INTS_NAME "/dev/rpivid-intcmem" @@ -44907,11 +44273,19 @@ index 0000000000..a6b5e8a189 + +static inline void apb_write_vc_addr(const RPI_T *const rpi, const uint32_t addr, const vid_vc_addr_t data) +{ ++#if TRACE_DEV ++ printf("W %x %08x\n", addr, MANGLE64(data)); ++#endif ++ + rpi->regs[addr >> 2] = MANGLE64(data); +} + +static inline void apb_write_vc_len(const RPI_T *const rpi, const uint32_t addr, const unsigned int data) +{ ++#if TRACE_DEV ++ printf("W %x %08x\n", addr, data >> 6); ++#endif ++ + rpi->regs[addr >> 2] = data >> 6; // ?? rnd64 - but not currently needed +} + @@ -44953,7 +44327,7 @@ index 0000000000..a6b5e8a189 + rpi->ints[0] = ival & mask_reset; +} + -+#if TRACE_DEV ++#if TRACE_DEV && 0 +static void apb_dump_regs(const RPI_T * const rpi, uint16_t addr, int num) { + int i; + @@ -44989,6 +44363,15 @@ index 0000000000..a6b5e8a189 +} +#endif + ++////////////////////////////////////////////////////////////////////////////// ++ ++static inline size_t round_up_size(const size_t x) ++{ ++ /* Admit no size < 256 */ ++ const unsigned int n = x < 256 ? 8 : av_log2(x) - 1; ++ ++ return x >= (3 << n) ? 4 << n : (3 << n); ++} + +////////////////////////////////////////////////////////////////////////////// +// Scaling factors @@ -45151,6 +44534,11 @@ index 0000000000..a6b5e8a189 +static int p1_apb_write(dec_env_t * const de, const uint16_t addr, const uint32_t data) { + if (de->cmd_len==de->cmd_max) + av_assert0(de->cmd_fifo = realloc(de->cmd_fifo, (de->cmd_max*=2)*sizeof(struct RPI_CMD))); ++ ++#if TRACE_DEV ++ printf("[%02x] %x %x\n", de->cmd_len, addr, data); ++#endif ++ + de->cmd_fifo[de->cmd_len].addr = addr; + de->cmd_fifo[de->cmd_len].data = data; + return de->cmd_len++; @@ -45950,6 +45338,7 @@ index 0000000000..a6b5e8a189 + coeffbuf_sem_claimed = 1; + tstart_phase(rpi, 1); + ++ status = 0; + for (;;) + { + // (Re-)allocate PU/COEFF stream space @@ -45960,9 +45349,19 @@ index 0000000000..a6b5e8a189 + pu_stride = rnd64(rpi->max_pu_msgs * 2 * de->PicWidthInCtbsY); + pu_size = pu_stride * de->PicHeightInCtbsY; + -+ if (pu_size > total_size) { -+ status = -1; -+ break; ++ if (pu_size >= total_size || status == -1) { ++ GPU_MEM_PTR_T newbuf; ++ ++ if (gpu_malloc_uncached(round_up_size(total_size + 1), &newbuf) != 0) ++ { ++ av_log(avctx, AV_LOG_ERROR, "Failed to reallocate coeffbuf\n"); ++ status = -1; ++ break; ++ } ++ gpu_free(rpi->gcoeffbufs + rpi->coeffbuf_no); ++ rpi->gcoeffbufs[rpi->coeffbuf_no] = newbuf; ++ status = 0; ++ continue; + } + + // Allocate all remaining space to coeff @@ -45976,7 +45375,7 @@ index 0000000000..a6b5e8a189 + + // Trigger command FIFO + apb_write(rpi, RPI_CFNUM, de->cmd_len); -+#if TRACE_DEV ++#if TRACE_DEV && 0 + apb_dump_regs(rpi, 0x0, 32); + apb_dump_regs(rpi, 0x8000, 24); + axi_dump(de, ((uint64_t)a64)<<6, de->cmd_len * sizeof(struct RPI_CMD)); @@ -45987,7 +45386,9 @@ index 0000000000..a6b5e8a189 + + status = check_status(rpi, de); + -+ if (status != 1) ++ if (status == -1) ++ continue; ++ else if (status != 1) + break; + + // Status 1 means out of PU space so try again with more @@ -46107,7 +45508,7 @@ index 0000000000..a6b5e8a189 + apb_write_vc_addr(rpi, RPI_COLBASE, rpi->gcolbuf.vc + de->dpbno_col * rpi->col_picsize); + } + -+#if TRACE_DEV ++#if TRACE_DEV && 0 + apb_dump_regs(rpi, 0x0, 32); + apb_dump_regs(rpi, 0x8000, 24); +#endif @@ -46161,12 +45562,59 @@ index 0000000000..a6b5e8a189 + +////////////////////////////////////////////////////////////////////////////// + ++ ++#if TRACE_DEV ++static void dump_data(const uint8_t * p, size_t len) ++{ ++ size_t i; ++ for (i = 0; i < len; i += 16) { ++ size_t j; ++ printf("%04x", i); ++ for (j = 0; j != 16; ++j) { ++ printf("%c%02x", i == 8 ? '-' : ' ', p[i+j]); ++ } ++ printf("\n"); ++ } ++} ++#endif ++ ++#if OPT_EMU ++static const uint8_t * ptr_from_index(const uint8_t * b, unsigned int idx) ++{ ++ unsigned int z = 0; ++ while (idx--) { ++ if (*b++ == 0) { ++ ++z; ++ if (z >= 2 && *b == 3) { ++ ++b; ++ z = 0; ++ } ++ } ++ else { ++ z = 0; ++ } ++ } ++ return b; ++} ++#endif ++ +static void WriteBitstream(dec_env_t * const de, const HEVCContext * const s) { -+ const int rpi_use_emu = 0; // FFmpeg removes emulation prevention bytes ++ const int rpi_use_emu = OPT_EMU; // FFmpeg removes emulation prevention bytes + const int offset = 0; // Always 64-byte aligned in sim, need not be on real hardware + const GetBitContext *gb = &s->HEVClc->gb; ++ ++#if OPT_EMU ++ const uint8_t *ptr = ptr_from_index(de->nal_buffer, gb->index/8 + 1); ++ const int len = de->nal_size - (ptr - de->nal_buffer); ++#else + const int len = 1 + gb->size_in_bits/8 - gb->index/8; + const void *ptr = &gb->buffer[gb->index/8]; ++#endif ++ ++#if TRACE_DEV ++ printf("Index=%d, /8=%#x\n", gb->index, gb->index/8); ++ dump_data(de->nal_buffer, 128); ++#endif + + p1_axi_write(de, len, ptr, p1_apb_write(de, RPI_BFBASE, 0)); // BFBASE set later + p1_apb_write(de, RPI_BFNUM, len); @@ -46286,8 +45734,13 @@ index 0000000000..a6b5e8a189 + } + de->state = RPIVID_DECODE_SLICE; + ++ de->nal_buffer = buffer; ++ de->nal_size = size; ++ ++#if !OPT_EMU +// ff_hevc_cabac_init(s, ctb_addr_ts); + cabac_start_align(s); ++#endif + if (s->ps.sps->scaling_list_enable_flag) + populate_scaling_factors(de, s); + pps->entropy_coding_sync_enabled_flag? wpp_decode_slice(de, s, ctb_addr_ts) @@ -46323,7 +45776,7 @@ index 0000000000..a6b5e8a189 + { + const AVZcEnvPtr zc = avctx->opaque; + av_rpi_zc_set_decoder_pool_size(zc, pool_req); -+ av_rpi_zc_get_buffer(zc, frame); // get_buffer2 would alloc ++ rv = av_rpi_zc_get_buffer(zc, frame); // get_buffer2 would alloc + } + else + { @@ -46534,6 +45987,8 @@ index 0000000000..a6b5e8a189 + } + } + ++ av_log(avctx, AV_LOG_INFO, "RPI HEVC h/w accel init OK\n"); ++ + return 0; + +fail: @@ -46576,7 +46031,7 @@ index 0000000000..a6b5e8a189 +}; + diff --git a/libavcodec/v4l2_buffers.c b/libavcodec/v4l2_buffers.c -index 02f23d954b..b516aa934a 100644 +index 02f23d954b..d4f26e416a 100644 --- a/libavcodec/v4l2_buffers.c +++ b/libavcodec/v4l2_buffers.c @@ -21,6 +21,7 @@ @@ -46587,7 +46042,7 @@ index 02f23d954b..b516aa934a 100644 #include #include #include -@@ -30,12 +31,13 @@ +@@ -30,12 +31,14 @@ #include "libavcodec/avcodec.h" #include "libavcodec/internal.h" #include "libavutil/pixdesc.h" @@ -46595,6 +46050,7 @@ index 02f23d954b..b516aa934a 100644 #include "v4l2_context.h" #include "v4l2_buffers.h" #include "v4l2_m2m.h" ++#include "weak_link.h" #define USEC_PER_SEC 1000000 -static AVRational v4l2_timebase = { 1, USEC_PER_SEC }; @@ -46602,7 +46058,7 @@ index 02f23d954b..b516aa934a 100644 static inline V4L2m2mContext *buf_to_m2mctx(V4L2Buffer *buf) { -@@ -52,34 +54,44 @@ static inline AVCodecContext *logger(V4L2Buffer *buf) +@@ -52,34 +55,44 @@ static inline AVCodecContext *logger(V4L2Buffer *buf) static inline AVRational v4l2_get_timebase(V4L2Buffer *avbuf) { V4L2m2mContext *s = buf_to_m2mctx(avbuf); @@ -46663,28 +46119,44 @@ index 02f23d954b..b516aa934a 100644 } static enum AVColorPrimaries v4l2_get_color_primaries(V4L2Buffer *buf) -@@ -210,7 +222,79 @@ static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf) +@@ -210,73 +223,149 @@ static enum AVColorTransferCharacteristic v4l2_get_color_trc(V4L2Buffer *buf) return AVCOL_TRC_UNSPECIFIED; } -static void v4l2_free_buffer(void *opaque, uint8_t *unused) +static uint8_t * v4l2_get_drm_frame(V4L2Buffer *avbuf) -+{ + { +- V4L2Buffer* avbuf = opaque; +- V4L2m2mContext *s = buf_to_m2mctx(avbuf); + AVDRMFrameDescriptor *drm_desc = &avbuf->drm_frame; + AVDRMLayerDescriptor *layer; -+ + +- if (atomic_fetch_sub(&avbuf->context_refcount, 1) == 1) { +- atomic_fetch_sub_explicit(&s->refcount, 1, memory_order_acq_rel); + /* fill the DRM frame descriptor */ + drm_desc->nb_objects = avbuf->num_planes; + drm_desc->nb_layers = 1; -+ + +- if (s->reinit) { +- if (!atomic_load(&s->refcount)) +- sem_post(&s->refsync); +- } else { +- if (s->draining && V4L2_TYPE_IS_OUTPUT(avbuf->context->type)) { +- /* no need to queue more buffers to the driver */ +- avbuf->status = V4L2BUF_AVAILABLE; +- } +- else if (avbuf->context->streamon) +- ff_v4l2_buffer_enqueue(avbuf); +- } + layer = &drm_desc->layers[0]; + layer->nb_planes = avbuf->num_planes; -+ + +- av_buffer_unref(&avbuf->context_ref); + for (int i = 0; i < avbuf->num_planes; i++) { + layer->planes[i].object_index = i; + layer->planes[i].offset = 0; + layer->planes[i].pitch = avbuf->plane_info[i].bytesperline; -+ } + } + + switch (avbuf->context->av_pix_fmt) { + case AV_PIX_FMT_YUYV422: @@ -46738,47 +46210,85 @@ index 02f23d954b..b516aa934a 100644 + } + + return (uint8_t *) drm_desc; -+} -+ -+static void v4l2_free_buffer(void *opaque, uint8_t *data) - { - V4L2Buffer* avbuf = opaque; - V4L2m2mContext *s = buf_to_m2mctx(avbuf); -@@ -226,14 +310,52 @@ static void v4l2_free_buffer(void *opaque, uint8_t *unused) - /* no need to queue more buffers to the driver */ - avbuf->status = V4L2BUF_AVAILABLE; - } -- else if (avbuf->context->streamon) -+ else if (avbuf->context->streamon) { -+ avbuf->buf.timestamp.tv_sec = 0; -+ avbuf->buf.timestamp.tv_usec = 0; - ff_v4l2_buffer_enqueue(avbuf); -+ } -+ else { -+ av_log(logger(avbuf), AV_LOG_DEBUG, "%s: Buffer freed but streamoff\n", avbuf->context->name); -+ } - } - - av_buffer_unref(&avbuf->context_ref); - } } +-static int v4l2_buf_increase_ref(V4L2Buffer *in) ++static void v4l2_free_bufref(void *opaque, uint8_t *data) + { +- V4L2m2mContext *s = buf_to_m2mctx(in); ++ AVBufferRef * bufref = (AVBufferRef *)data; ++ V4L2Buffer *avbuf = (V4L2Buffer *)bufref->data; ++ struct V4L2Context *ctx = ff_weak_link_lock(&avbuf->context_wl); + +- if (in->context_ref) +- atomic_fetch_add(&in->context_refcount, 1); +- else { +- in->context_ref = av_buffer_ref(s->self_ref); +- if (!in->context_ref) +- return AVERROR(ENOMEM); ++ if (ctx != NULL) { ++ // Buffer still attached to context ++ V4L2m2mContext *s = buf_to_m2mctx(avbuf); + +- in->context_refcount = 1; +- } ++ ff_mutex_lock(&ctx->lock); + +- in->status = V4L2BUF_RET_USER; +- atomic_fetch_add_explicit(&s->refcount, 1, memory_order_relaxed); ++ avbuf->status = V4L2BUF_AVAILABLE; + +- return 0; ++ if (s->draining && V4L2_TYPE_IS_OUTPUT(ctx->type)) { ++ av_log(logger(avbuf), AV_LOG_DEBUG, "%s: Buffer avail\n", ctx->name); ++ /* no need to queue more buffers to the driver */ ++ } ++ else if (ctx->streamon) { ++ av_log(logger(avbuf), AV_LOG_DEBUG, "%s: Buffer requeue\n", ctx->name); ++ avbuf->buf.timestamp.tv_sec = 0; ++ avbuf->buf.timestamp.tv_usec = 0; ++ ff_v4l2_buffer_enqueue(avbuf); // will set to IN_DRIVER ++ } ++ else { ++ av_log(logger(avbuf), AV_LOG_DEBUG, "%s: Buffer freed but streamoff\n", ctx->name); ++ } ++ ++ ff_mutex_unlock(&ctx->lock); ++ } ++ ++ ff_weak_link_unlock(avbuf->context_wl); ++ av_buffer_unref(&bufref); + } + +-static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf) +static int v4l2_buffer_export_drm(V4L2Buffer* avbuf) -+{ + { +- int ret; + struct v4l2_exportbuffer expbuf; + int i, ret; -+ + +- if (plane >= in->num_planes) +- return AVERROR(EINVAL); + for (i = 0; i < avbuf->num_planes; i++) { + memset(&expbuf, 0, sizeof(expbuf)); -+ + +- /* even though most encoders return 0 in data_offset encoding vp8 does require this value */ +- *buf = av_buffer_create((char *)in->plane_info[plane].mm_addr + in->planes[plane].data_offset, +- in->plane_info[plane].length, v4l2_free_buffer, in, 0); +- if (!*buf) +- return AVERROR(ENOMEM); + expbuf.index = avbuf->buf.index; + expbuf.type = avbuf->buf.type; + expbuf.plane = i; -+ + +- ret = v4l2_buf_increase_ref(in); +- if (ret) +- av_buffer_unref(buf); + ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_EXPBUF, &expbuf); + if (ret < 0) + return AVERROR(errno); -+ + +- return ret; + if (V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type)) { + /* drm frame */ + avbuf->drm_frame.objects[i].size = avbuf->buf.m.planes[i].length; @@ -46793,96 +46303,137 @@ index 02f23d954b..b516aa934a 100644 + } + + return 0; -+} -+ - static int v4l2_buf_increase_ref(V4L2Buffer *in) - { - V4L2m2mContext *s = buf_to_m2mctx(in); -@@ -254,6 +376,24 @@ static int v4l2_buf_increase_ref(V4L2Buffer *in) - return 0; - } - -+static int v4l2_buf_to_bufref_drm(V4L2Buffer *in, AVBufferRef **buf) -+{ -+ int ret; -+ -+ *buf = av_buffer_create((uint8_t *) &in->drm_frame, -+ sizeof(in->drm_frame), -+ v4l2_free_buffer, -+ in, AV_BUFFER_FLAG_READONLY); -+ if (!*buf) -+ return AVERROR(ENOMEM); -+ -+ ret = v4l2_buf_increase_ref(in); -+ if (ret) -+ av_buffer_unref(buf); -+ -+ return ret; -+} -+ - static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf) - { - int ret; -@@ -274,7 +414,7 @@ static int v4l2_buf_to_bufref(V4L2Buffer *in, int plane, AVBufferRef **buf) - return ret; } -static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset, AVBufferRef* bref) +static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, int size, int offset) { unsigned int bytesused, length; ++ int rv = 0; -@@ -286,13 +426,7 @@ static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, i - - memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset)); + if (plane >= out->num_planes) + return AVERROR(EINVAL); +@@ -284,32 +373,57 @@ static int v4l2_bufref_to_buf(V4L2Buffer *out, int plane, const uint8_t* data, i + length = out->plane_info[plane].length; + bytesused = FFMIN(size+offset, length); +- memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, FFMIN(size, length-offset)); +- - if (V4L2_TYPE_IS_MULTIPLANAR(out->buf.type)) { - out->planes[plane].bytesused = bytesused; - out->planes[plane].length = length; - } else { - out->buf.bytesused = bytesused; - out->buf.length = length; -- } -+ set_buf_length(out, plane, bytesused, length); ++ if (size > length - offset) { ++ size = length - offset; ++ rv = AVERROR(ENOMEM); + } - return 0; +- return 0; ++ memcpy((uint8_t*)out->plane_info[plane].mm_addr+offset, data, size); ++ ++ set_buf_length(out, plane, bytesused, length); ++ ++ return rv; ++} ++ ++static AVBufferRef * wrap_avbuf(V4L2Buffer * const avbuf) ++{ ++ AVBufferRef * bufref = av_buffer_ref(avbuf->context->bufrefs[avbuf->buf.index]); ++ AVBufferRef * newbuf; ++ ++ if (!bufref) ++ return NULL; ++ ++ newbuf = av_buffer_create((uint8_t *)bufref, sizeof(*bufref), v4l2_free_bufref, NULL, 0); ++ if (newbuf == NULL) ++ av_buffer_unref(&bufref); ++ ++ avbuf->status = V4L2BUF_RET_USER; ++ return newbuf; } -@@ -303,13 +437,25 @@ static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) + + static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) + { +- int i, ret; ++ int i; frame->format = avbuf->context->av_pix_fmt; - for (i = 0; i < avbuf->num_planes; i++) { - ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]); +- if (ret) +- return ret; ++ frame->buf[0] = wrap_avbuf(avbuf); ++ if (frame->buf[0] == NULL) ++ return AVERROR(ENOMEM); + + if (buf_to_m2mctx(avbuf)->output_drm) { + /* 1. get references to the actual data */ -+ ret = v4l2_buf_to_bufref_drm(avbuf, &frame->buf[0]); - if (ret) - return ret; - -- frame->linesize[i] = avbuf->plane_info[i].bytesperline; -- frame->data[i] = frame->buf[i]->data; + frame->data[0] = (uint8_t *) v4l2_get_drm_frame(avbuf); + frame->format = AV_PIX_FMT_DRM_PRIME; + frame->hw_frames_ctx = av_buffer_ref(avbuf->context->frames_ref); -+ } else { -+ /* 1. get references to the actual data */ -+ for (i = 0; i < avbuf->num_planes; i++) { -+ ret = v4l2_buf_to_bufref(avbuf, i, &frame->buf[i]); -+ if (ret) -+ return ret; ++ return 0; ++ } + -+ frame->linesize[i] = avbuf->plane_info[i].bytesperline; -+ frame->data[i] = frame->buf[i]->data; -+ } ++ ++ /* 1. get references to the actual data */ ++ for (i = 0; i < avbuf->num_planes; i++) { ++ frame->data[i] = (uint8_t *)avbuf->plane_info[i].mm_addr + avbuf->planes[i].data_offset; + frame->linesize[i] = avbuf->plane_info[i].bytesperline; +- frame->data[i] = frame->buf[i]->data; } /* fixup special cases */ -@@ -338,68 +484,95 @@ static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) +@@ -318,17 +432,17 @@ static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) + case AV_PIX_FMT_NV21: + if (avbuf->num_planes > 1) + break; +- frame->linesize[1] = avbuf->plane_info[0].bytesperline; +- frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height; ++ frame->linesize[1] = frame->linesize[0]; ++ frame->data[1] = frame->data[0] + frame->linesize[0] * ff_v4l2_get_format_height(&avbuf->context->format); + break; + + case AV_PIX_FMT_YUV420P: + if (avbuf->num_planes > 1) + break; +- frame->linesize[1] = avbuf->plane_info[0].bytesperline >> 1; +- frame->linesize[2] = avbuf->plane_info[0].bytesperline >> 1; +- frame->data[1] = frame->buf[0]->data + avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height; +- frame->data[2] = frame->data[1] + ((avbuf->plane_info[0].bytesperline * avbuf->context->format.fmt.pix_mp.height) >> 2); ++ frame->linesize[1] = frame->linesize[0] / 2; ++ frame->linesize[2] = frame->linesize[1]; ++ frame->data[1] = frame->data[0] + frame->linesize[0] * ff_v4l2_get_format_height(&avbuf->context->format); ++ frame->data[2] = frame->data[1] + frame->linesize[1] * ff_v4l2_get_format_height(&avbuf->context->format) / 2; + break; + + default: +@@ -338,68 +452,95 @@ static int v4l2_buffer_buf_to_swframe(AVFrame *frame, V4L2Buffer *avbuf) return 0; } --static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) +static void cpy_2d(uint8_t * dst, int dst_stride, const uint8_t * src, int src_stride, int w, int h) ++{ ++ if (dst_stride == src_stride && w + 32 >= dst_stride) { ++ memcpy(dst, src, dst_stride * h); ++ } ++ else { ++ while (--h >= 0) { ++ memcpy(dst, src, w); ++ dst += dst_stride; ++ src += src_stride; ++ } ++ } ++} ++ ++static int is_chroma(const AVPixFmtDescriptor *desc, int i, int num_planes) ++{ ++ return i != 0 && !(i == num_planes - 1 && (desc->flags & AV_PIX_FMT_FLAG_ALPHA)); ++} ++ + static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) { - int i, ret; - struct v4l2_format fmt = out->context->format; @@ -46914,45 +46465,30 @@ index 02f23d954b..b516aa934a 100644 - case V4L2_PIX_FMT_NV16M: - case V4L2_PIX_FMT_NV61M: - is_planar_format = 1; -+ if (dst_stride == src_stride && w + 32 >= dst_stride) { -+ memcpy(dst, src, dst_stride * h); -+ } -+ else { -+ while (--h >= 0) { -+ memcpy(dst, src, w); -+ dst += dst_stride; -+ src += src_stride; -+ } - } -+} -+ -+static int is_chroma(const AVPixFmtDescriptor *desc, int i, int num_planes) -+{ -+ return i != 0 && !(i == num_planes - 1 && (desc->flags & AV_PIX_FMT_FLAG_ALPHA)); -+} - +- } +- - if (!is_planar_format) { - const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); - int planes_nb = 0; - int offset = 0; -+static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) -+{ +- +- for (i = 0; i < desc->nb_components; i++) +- planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1); +- +- for (i = 0; i < planes_nb; i++) { +- int size, h = height; +- if (i == 1 || i == 2) { + int i; + int num_planes = 0; + int pel_strides[4] = {0}; - -- for (i = 0; i < desc->nb_components; i++) -- planes_nb = FFMAX(planes_nb, desc->comp[i].plane + 1); ++ + const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); + + if ((desc->flags & AV_PIX_FMT_FLAG_HWACCEL) != 0) { + av_log(NULL, AV_LOG_ERROR, "%s: HWACCEL cannot be copied\n", __func__); + return -1; + } - -- for (i = 0; i < planes_nb; i++) { -- int size, h = height; -- if (i == 1 || i == 2) { ++ + for (i = 0; i != desc->nb_components; ++i) { + if (desc->comp[i].plane >= num_planes) + num_planes = desc->comp[i].plane + 1; @@ -47024,7 +46560,7 @@ index 02f23d954b..b516aa934a 100644 return 0; } -@@ -411,12 +584,12 @@ static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) +@@ -411,14 +552,15 @@ static int v4l2_buffer_swframe_to_buf(const AVFrame *frame, V4L2Buffer *out) int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out) { @@ -47038,8 +46574,11 @@ index 02f23d954b..b516aa934a 100644 +int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf, int no_rescale_pts) { int ret; ++ V4L2Context * const ctx = avbuf->context; -@@ -433,7 +606,7 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf) + av_frame_unref(frame); + +@@ -433,13 +575,22 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf) frame->colorspace = v4l2_get_color_space(avbuf); frame->color_range = v4l2_get_color_range(avbuf); frame->color_trc = v4l2_get_color_trc(avbuf); @@ -47048,7 +46587,47 @@ index 02f23d954b..b516aa934a 100644 frame->pkt_dts = AV_NOPTS_VALUE; /* these values are updated also during re-init in v4l2_process_driver_event */ -@@ -470,20 +643,27 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf) +- frame->height = avbuf->context->height; +- frame->width = avbuf->context->width; +- frame->sample_aspect_ratio = avbuf->context->sample_aspect_ratio; ++ frame->height = ctx->height; ++ frame->width = ctx->width; ++ frame->sample_aspect_ratio = ctx->sample_aspect_ratio; ++ ++ if (ctx->selection.height && ctx->selection.width) { ++ frame->crop_left = ctx->selection.left < frame->width ? ctx->selection.left : 0; ++ frame->crop_top = ctx->selection.top < frame->height ? ctx->selection.top : 0; ++ frame->crop_right = ctx->selection.left + ctx->selection.width < frame->width ? ++ frame->width - (ctx->selection.left + ctx->selection.width) : 0; ++ frame->crop_bottom = ctx->selection.top + ctx->selection.height < frame->height ? ++ frame->width - (ctx->selection.top + ctx->selection.height) : 0; ++ } + + /* 3. report errors upstream */ + if (avbuf->buf.flags & V4L2_BUF_FLAG_ERROR) { +@@ -452,15 +603,16 @@ int ff_v4l2_buffer_buf_to_avframe(AVFrame *frame, V4L2Buffer *avbuf) + + int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf) + { +- int ret; ++ av_log(logger(avbuf), AV_LOG_INFO, "%s\n", __func__); + + av_packet_unref(pkt); +- ret = v4l2_buf_to_bufref(avbuf, 0, &pkt->buf); +- if (ret) +- return ret; ++ ++ pkt->buf = wrap_avbuf(avbuf); ++ if (pkt->buf == NULL) ++ return AVERROR(ENOMEM); + + pkt->size = V4L2_TYPE_IS_MULTIPLANAR(avbuf->buf.type) ? avbuf->buf.m.planes[0].bytesused : avbuf->buf.bytesused; +- pkt->data = pkt->buf->data; ++ pkt->data = (uint8_t*)avbuf->plane_info[0].mm_addr + avbuf->planes[0].data_offset; + + if (avbuf->buf.flags & V4L2_BUF_FLAG_KEYFRAME) + pkt->flags |= AV_PKT_FLAG_KEY; +@@ -470,36 +622,89 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *avbuf) pkt->flags |= AV_PKT_FLAG_CORRUPT; } @@ -47065,6 +46644,7 @@ index 02f23d954b..b516aa934a 100644 int ret; - ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, 0, pkt->buf); +- if (ret) + if (extlen) { + ret = v4l2_bufref_to_buf(out, 0, extdata, extlen, 0); + if (ret) @@ -47072,7 +46652,7 @@ index 02f23d954b..b516aa934a 100644 + } + + ret = v4l2_bufref_to_buf(out, 0, pkt->data, pkt->size, extlen); - if (ret) ++ if (ret && ret != AVERROR(ENOMEM)) return ret; - v4l2_set_pts(out, pkt->pts); @@ -47080,47 +46660,81 @@ index 02f23d954b..b516aa934a 100644 if (pkt->flags & AV_PKT_FLAG_KEY) out->flags = V4L2_BUF_FLAG_KEYFRAME; -@@ -491,6 +671,11 @@ int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out) - return 0; - } +- return 0; ++ return ret; ++} ++ +int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out) +{ + return ff_v4l2_buffer_avpkt_to_buf_ext(pkt, out, NULL, 0, 0); + } + +-int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) ++ ++static void v4l2_buffer_buffer_free(void *opaque, uint8_t *data) ++{ ++ V4L2Buffer * const avbuf = (V4L2Buffer *)data; ++ int i; ++ ++ for (i = 0; i != FF_ARRAY_ELEMS(avbuf->plane_info); ++i) { ++ struct V4L2Plane_info *p = avbuf->plane_info + i; ++ if (p->mm_addr != NULL) ++ munmap(p->mm_addr, p->length); ++ } ++ ++ for (i = 0; i != FF_ARRAY_ELEMS(avbuf->drm_frame.objects); ++i) { ++ if (avbuf->drm_frame.objects[i].fd != -1) ++ close(avbuf->drm_frame.objects[i].fd); ++ } ++ ++ ff_weak_link_unref(&avbuf->context_wl); ++ ++ av_free(avbuf); +} + - int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) ++ ++int ff_v4l2_buffer_initialize(AVBufferRef ** pbufref, int index, V4L2Context *ctx) { - V4L2Context *ctx = avbuf->context; -@@ -500,6 +685,27 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) +- V4L2Context *ctx = avbuf->context; + int ret, i; ++ V4L2Buffer * const avbuf = av_mallocz(sizeof(*avbuf)); ++ AVBufferRef * bufref; ++ ++ *pbufref = NULL; ++ if (avbuf == NULL) ++ return AVERROR(ENOMEM); ++ ++ bufref = av_buffer_create((uint8_t*)avbuf, sizeof(*avbuf), v4l2_buffer_buffer_free, NULL, 0); ++ if (bufref == NULL) { ++ av_free(avbuf); ++ return AVERROR(ENOMEM); ++ } + ++ avbuf->context = ctx; + avbuf->buf.memory = V4L2_MEMORY_MMAP; avbuf->buf.type = ctx->type; avbuf->buf.index = index; -+ if (buf_to_m2mctx(avbuf)->output_drm) { -+ AVHWFramesContext *hwframes; -+ -+ av_buffer_unref(&ctx->frames_ref); -+ -+ ctx->frames_ref = av_hwframe_ctx_alloc(buf_to_m2mctx(avbuf)->device_ref); -+ if (!ctx->frames_ref) { -+ ret = AVERROR(ENOMEM); -+ return ret; -+ } -+ -+ hwframes = (AVHWFramesContext*)ctx->frames_ref->data; -+ hwframes->format = AV_PIX_FMT_DRM_PRIME; -+ hwframes->sw_format = ctx->av_pix_fmt; -+ hwframes->width = ctx->width; -+ hwframes->height = ctx->height; -+ ret = av_hwframe_ctx_init(ctx->frames_ref); -+ if (ret < 0) -+ return ret; ++ for (i = 0; i != FF_ARRAY_ELEMS(avbuf->drm_frame.objects); ++i) { ++ avbuf->drm_frame.objects[i].fd = -1; + } ++ ++ avbuf->context_wl = ff_weak_link_ref(ctx->wl_master); + if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { avbuf->buf.length = VIDEO_MAX_PLANES; avbuf->buf.m.planes = avbuf->planes; -@@ -527,14 +733,22 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) +@@ -507,7 +712,7 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) + + ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QUERYBUF, &avbuf->buf); + if (ret < 0) +- return AVERROR(errno); ++ goto fail; + + if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { + avbuf->num_planes = 0; +@@ -527,25 +732,33 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { avbuf->plane_info[i].length = avbuf->buf.m.planes[i].length; @@ -47148,8 +46762,14 @@ index 02f23d954b..b516aa934a 100644 + } } - if (avbuf->plane_info[i].mm_addr == MAP_FAILED) -@@ -543,9 +757,6 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) +- if (avbuf->plane_info[i].mm_addr == MAP_FAILED) +- return AVERROR(ENOMEM); ++ if (avbuf->plane_info[i].mm_addr == MAP_FAILED) { ++ avbuf->plane_info[i].mm_addr = NULL; ++ ret = AVERROR(ENOMEM); ++ goto fail; ++ } + } avbuf->status = V4L2BUF_AVAILABLE; @@ -47159,37 +46779,63 @@ index 02f23d954b..b516aa934a 100644 if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { avbuf->buf.m.planes = avbuf->planes; avbuf->buf.length = avbuf->num_planes; -@@ -555,6 +766,15 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) +@@ -555,7 +768,20 @@ int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index) avbuf->buf.length = avbuf->planes[0].length; } -+ if (V4L2_TYPE_IS_OUTPUT(ctx->type)) -+ return 0; -+ -+ if (buf_to_m2mctx(avbuf)->output_drm) { -+ ret = v4l2_buffer_export_drm(avbuf); -+ if (ret) -+ return ret; +- return ff_v4l2_buffer_enqueue(avbuf); ++ if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) { ++ if (buf_to_m2mctx(avbuf)->output_drm) { ++ ret = v4l2_buffer_export_drm(avbuf); ++ if (ret) ++ goto fail; ++ } + } + - return ff_v4l2_buffer_enqueue(avbuf); ++ *pbufref = bufref; ++ return 0; ++ ++fail: ++ av_buffer_unref(&bufref); ++ return ret; } -@@ -568,6 +788,9 @@ int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf) - if (ret < 0) - return AVERROR(errno); + int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf) +@@ -564,9 +790,27 @@ int ff_v4l2_buffer_enqueue(V4L2Buffer* avbuf) -+ ++avbuf->context->q_count; -+ av_log(logger(avbuf), AV_LOG_DEBUG, "--- %s VIDIOC_QBUF: index %d, count=%d\n", avbuf->context->name, avbuf->buf.index, avbuf->context->q_count); + avbuf->buf.flags = avbuf->flags; + ++ if (avbuf->buf.timestamp.tv_sec || avbuf->buf.timestamp.tv_usec) { ++ av_log(logger(avbuf), AV_LOG_DEBUG, "--- %s pre VIDIOC_QBUF: index %d, ts=%ld.%06ld count=%d\n", ++ avbuf->context->name, avbuf->buf.index, ++ avbuf->buf.timestamp.tv_sec, avbuf->buf.timestamp.tv_usec, ++ avbuf->context->q_count); ++ } + + ret = ioctl(buf_to_m2mctx(avbuf)->fd, VIDIOC_QBUF, &avbuf->buf); +- if (ret < 0) +- return AVERROR(errno); ++ if (ret < 0) { ++ int err = errno; ++ av_log(logger(avbuf), AV_LOG_ERROR, "--- %s VIDIOC_QBUF: index %d FAIL err %d (%s)\n", ++ avbuf->context->name, avbuf->buf.index, ++ err, strerror(err)); ++ return AVERROR(err); ++ } ++ ++ ++avbuf->context->q_count; ++ av_log(logger(avbuf), AV_LOG_DEBUG, "--- %s VIDIOC_QBUF: index %d, ts=%ld.%06ld count=%d\n", ++ avbuf->context->name, avbuf->buf.index, ++ avbuf->buf.timestamp.tv_sec, avbuf->buf.timestamp.tv_usec, ++ avbuf->context->q_count); + avbuf->status = V4L2BUF_IN_DRIVER; - return 0; diff --git a/libavcodec/v4l2_buffers.h b/libavcodec/v4l2_buffers.h -index 8dbc7fc104..46ca85ce65 100644 +index 8dbc7fc104..9909f349bd 100644 --- a/libavcodec/v4l2_buffers.h +++ b/libavcodec/v4l2_buffers.h -@@ -27,6 +27,7 @@ +@@ -27,25 +27,34 @@ #include #include @@ -47197,17 +46843,39 @@ index 8dbc7fc104..46ca85ce65 100644 #include "avcodec.h" enum V4L2Buffer_status { -@@ -42,6 +43,9 @@ typedef struct V4L2Buffer { - /* each buffer needs to have a reference to its context */ - struct V4L2Context *context; + V4L2BUF_AVAILABLE, + V4L2BUF_IN_DRIVER, ++ V4L2BUF_IN_USE, + V4L2BUF_RET_USER, + }; + /** + * V4L2Buffer (wrapper for v4l2_buffer management) + */ ++struct V4L2Context; ++struct ff_weak_link_client; ++ + typedef struct V4L2Buffer { +- /* each buffer needs to have a reference to its context */ ++ /* each buffer needs to have a reference to its context ++ * The pointer is good enough for most operation but once the buffer has ++ * been passed to the user the buffer may become orphaned so for free ops ++ * the weak link must be used to ensure that the context is actually ++ * there ++ */ + struct V4L2Context *context; ++ struct ff_weak_link_client *context_wl; + +- /* This object is refcounted per-plane, so we need to keep track +- * of how many context-refs we are holding. */ +- AVBufferRef *context_ref; +- atomic_uint context_refcount; + /* DRM descriptor */ + AVDRMFrameDescriptor drm_frame; -+ - /* This object is refcounted per-plane, so we need to keep track - * of how many context-refs we are holding. */ - AVBufferRef *context_ref; -@@ -70,11 +74,12 @@ typedef struct V4L2Buffer { + + /* keep track of the mmap address and mmap length */ + struct V4L2Plane_info { +@@ -70,11 +79,12 @@ typedef struct V4L2Buffer { * * @param[in] frame The AVFRame to push the information to * @param[in] buf The V4L2Buffer to get the information from @@ -47221,7 +46889,7 @@ index 8dbc7fc104..46ca85ce65 100644 /** * Extracts the data from a V4L2Buffer to an AVPacket -@@ -98,6 +103,9 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *buf); +@@ -98,6 +108,9 @@ int ff_v4l2_buffer_buf_to_avpkt(AVPacket *pkt, V4L2Buffer *buf); */ int ff_v4l2_buffer_avpkt_to_buf(const AVPacket *pkt, V4L2Buffer *out); @@ -47231,39 +46899,255 @@ index 8dbc7fc104..46ca85ce65 100644 /** * Extracts the data from an AVFrame to a V4L2Buffer * +@@ -116,7 +129,7 @@ int ff_v4l2_buffer_avframe_to_buf(const AVFrame *frame, V4L2Buffer *out); + * + * @returns 0 in case of success, a negative AVERROR code otherwise + */ +-int ff_v4l2_buffer_initialize(V4L2Buffer* avbuf, int index); ++int ff_v4l2_buffer_initialize(AVBufferRef **avbuf, int index, struct V4L2Context *ctx); + + /** + * Enqueues a V4L2Buffer diff --git a/libavcodec/v4l2_context.c b/libavcodec/v4l2_context.c -index 29b144ed73..a8590d0ea1 100644 +index 29b144ed73..24cee598eb 100644 --- a/libavcodec/v4l2_context.c +++ b/libavcodec/v4l2_context.c -@@ -173,7 +173,8 @@ static int v4l2_handle_event(V4L2Context *ctx) - } +@@ -27,11 +27,13 @@ + #include + #include + #include ++#include "libavutil/avassert.h" + #include "libavcodec/avcodec.h" + #include "libavcodec/internal.h" + #include "v4l2_buffers.h" + #include "v4l2_fmt.h" + #include "v4l2_m2m.h" ++#include "weak_link.h" - if (evt.type == V4L2_EVENT_EOS) { + struct v4l2_format_update { + uint32_t v4l2_fmt; +@@ -53,16 +55,6 @@ static inline AVCodecContext *logger(V4L2Context *ctx) + return ctx_to_m2mctx(ctx)->avctx; + } + +-static inline unsigned int v4l2_get_width(struct v4l2_format *fmt) +-{ +- return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width; +-} +- +-static inline unsigned int v4l2_get_height(struct v4l2_format *fmt) +-{ +- return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height; +-} +- + static AVRational v4l2_get_sar(V4L2Context *ctx) + { + struct AVRational sar = { 0, 1 }; +@@ -94,8 +86,8 @@ static inline unsigned int v4l2_resolution_changed(V4L2Context *ctx, struct v4l2 + if (ret) + av_log(logger(ctx), AV_LOG_DEBUG, "%s changed (%dx%d) -> (%dx%d)\n", + ctx->name, +- v4l2_get_width(fmt1), v4l2_get_height(fmt1), +- v4l2_get_width(fmt2), v4l2_get_height(fmt2)); ++ ff_v4l2_get_format_width(fmt1), ff_v4l2_get_format_height(fmt1), ++ ff_v4l2_get_format_width(fmt2), ff_v4l2_get_format_height(fmt2)); + + return ret; + } +@@ -153,58 +145,67 @@ static inline void v4l2_save_to_context(V4L2Context* ctx, struct v4l2_format_upd + } + } + +-/** +- * handle resolution change event and end of stream event +- * returns 1 if reinit was successful, negative if it failed +- * returns 0 if reinit was not executed +- */ +-static int v4l2_handle_event(V4L2Context *ctx) ++static int get_default_selection(V4L2Context * const ctx, struct v4l2_rect *r) + { +- V4L2m2mContext *s = ctx_to_m2mctx(ctx); +- struct v4l2_format cap_fmt = s->capture.format; +- struct v4l2_format out_fmt = s->output.format; +- struct v4l2_event evt = { 0 }; +- int full_reinit, reinit, ret; ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); ++ struct v4l2_selection selection = { ++ .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, ++ .target = V4L2_SEL_TGT_COMPOSE ++ }; + +- ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt); +- if (ret < 0) { +- av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name); +- return 0; +- } ++ memset(r, 0, sizeof(*r)); ++ if (ioctl(s->fd, VIDIOC_G_SELECTION, &selection)) ++ return AVERROR(errno); + +- if (evt.type == V4L2_EVENT_EOS) { - ctx->done = 1; -+// ctx->done = 1; -+ av_log(logger(ctx), AV_LOG_TRACE, "%s VIDIOC_EVENT_EOS\n", ctx->name); +- return 0; +- } ++ *r = selection.r; ++ return 0; ++} + +- if (evt.type != V4L2_EVENT_SOURCE_CHANGE) +- return 0; ++static int do_source_change(V4L2m2mContext * const s) ++{ ++ AVCodecContext *const avctx = s->avctx; ++ ++ int ret; ++ int reinit; ++ int full_reinit; ++ struct v4l2_format cap_fmt = s->capture.format; ++ struct v4l2_format out_fmt = s->output.format; ++ ++ s->resize_pending = 0; ++ s->capture.done = 0; + + ret = ioctl(s->fd, VIDIOC_G_FMT, &out_fmt); + if (ret) { +- av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->output.name); ++ av_log(avctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", s->output.name); return 0; } -@@ -196,15 +197,15 @@ static int v4l2_handle_event(V4L2Context *ctx) + ret = ioctl(s->fd, VIDIOC_G_FMT, &cap_fmt); + if (ret) { +- av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT\n", s->capture.name); ++ av_log(avctx, AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", s->capture.name); + return 0; + } + + full_reinit = v4l2_resolution_changed(&s->output, &out_fmt); if (full_reinit) { - s->output.height = v4l2_get_height(&out_fmt); - s->output.width = v4l2_get_width(&out_fmt); +- s->output.height = v4l2_get_height(&out_fmt); +- s->output.width = v4l2_get_width(&out_fmt); - s->output.sample_aspect_ratio = v4l2_get_sar(&s->output); ++ s->output.height = ff_v4l2_get_format_height(&out_fmt); ++ s->output.width = ff_v4l2_get_format_width(&out_fmt); } + s->output.sample_aspect_ratio = v4l2_get_sar(&s->output); ++ ++ get_default_selection(&s->capture, &s->capture.selection); reinit = v4l2_resolution_changed(&s->capture, &cap_fmt); if (reinit) { - s->capture.height = v4l2_get_height(&cap_fmt); - s->capture.width = v4l2_get_width(&cap_fmt); +- s->capture.height = v4l2_get_height(&cap_fmt); +- s->capture.width = v4l2_get_width(&cap_fmt); - s->capture.sample_aspect_ratio = v4l2_get_sar(&s->capture); ++ s->capture.height = ff_v4l2_get_format_height(&cap_fmt); ++ s->capture.width = ff_v4l2_get_format_width(&cap_fmt); } + s->capture.sample_aspect_ratio = v4l2_get_sar(&s->capture); ++ ++ av_log(avctx, AV_LOG_DEBUG, "Source change: SAR: %d/%d, crop %dx%d @ %d,%d\n", ++ s->capture.sample_aspect_ratio.num, s->capture.sample_aspect_ratio.den, ++ s->capture.selection.width, s->capture.selection.height, ++ s->capture.selection.left, s->capture.selection.top); if (full_reinit || reinit) s->reinit = 1; -@@ -280,6 +281,21 @@ static int v4l2_stop_encode(V4L2Context *ctx) +@@ -212,34 +213,88 @@ static int v4l2_handle_event(V4L2Context *ctx) + if (full_reinit) { + ret = ff_v4l2_m2m_codec_full_reinit(s); + if (ret) { +- av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit\n"); ++ av_log(avctx, AV_LOG_ERROR, "v4l2_m2m_codec_full_reinit failed\n"); + return AVERROR(EINVAL); + } + goto reinit_run; + } + + if (reinit) { +- if (s->avctx) ++ if (avctx) + ret = ff_set_dimensions(s->avctx, s->capture.width, s->capture.height); + if (ret < 0) +- av_log(logger(ctx), AV_LOG_WARNING, "update avcodec height and width\n"); ++ av_log(avctx, AV_LOG_WARNING, "update avcodec height and width failed\n"); + + ret = ff_v4l2_m2m_codec_reinit(s); + if (ret) { +- av_log(logger(ctx), AV_LOG_ERROR, "v4l2_m2m_codec_reinit\n"); ++ av_log(avctx, AV_LOG_ERROR, "v4l2_m2m_codec_reinit failed\n"); + return AVERROR(EINVAL); + } + goto reinit_run; + } + +- /* dummy event received */ +- return 0; ++ /* Buffers are OK so just stream off to ack */ ++ av_log(avctx, AV_LOG_DEBUG, "%s: Parameters only\n", __func__); ++ ++ ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMOFF); ++ if (ret) ++ av_log(avctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF failed\n"); ++ s->draining = 0; + + /* reinit executed */ + reinit_run: ++ ret = ff_v4l2_context_set_status(&s->capture, VIDIOC_STREAMON); + return 1; + } + ++static int ctx_done(V4L2Context * const ctx) ++{ ++ int rv = 0; ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); ++ ++ ctx->done = 1; ++ ++ if (s->resize_pending && !V4L2_TYPE_IS_OUTPUT(ctx->type)) ++ rv = do_source_change(s); ++ ++ return rv; ++} ++ ++/** ++ * handle resolution change event and end of stream event ++ * returns 1 if reinit was successful, negative if it failed ++ * returns 0 if reinit was not executed ++ */ ++static int v4l2_handle_event(V4L2Context *ctx) ++{ ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); ++ struct v4l2_event evt = { 0 }; ++ int ret; ++ ++ ret = ioctl(s->fd, VIDIOC_DQEVENT, &evt); ++ if (ret < 0) { ++ av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_DQEVENT\n", ctx->name); ++ return 0; ++ } ++ ++ av_log(logger(ctx), AV_LOG_INFO, "Dq event %d\n", evt.type); ++ ++ if (evt.type == V4L2_EVENT_EOS) { ++// ctx->done = 1; ++ av_log(logger(ctx), AV_LOG_TRACE, "%s VIDIOC_EVENT_EOS\n", ctx->name); ++ return 0; ++ } ++ ++ if (evt.type != V4L2_EVENT_SOURCE_CHANGE) ++ return 0; ++ ++ s->resize_pending = 1; ++ if (!ctx->done) ++ return 0; ++ ++ return do_source_change(s); ++} ++ + static int v4l2_stop_decode(V4L2Context *ctx) + { + struct v4l2_decoder_cmd cmd = { +@@ -280,8 +335,26 @@ static int v4l2_stop_encode(V4L2Context *ctx) return 0; } @@ -47272,11 +47156,12 @@ index 29b144ed73..a8590d0ea1 100644 + int i; + int n = 0; + -+ if (!ctx->buffers) ++ if (!ctx->bufrefs) + return -1; + + for (i = 0; i < ctx->num_buffers; ++i) { -+ if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER) ++ V4L2Buffer *const avbuf = (V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (avbuf->status == V4L2BUF_IN_DRIVER) + ++n; + } + return n; @@ -47284,12 +47169,25 @@ index 29b144ed73..a8590d0ea1 100644 + static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout) { ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); ++ const int is_capture = !V4L2_TYPE_IS_OUTPUT(ctx->type); struct v4l2_plane planes[VIDEO_MAX_PLANES]; -@@ -296,11 +312,13 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout) - if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER) + struct v4l2_buffer buf = { 0 }; + V4L2Buffer *avbuf; +@@ -290,50 +363,84 @@ static V4L2Buffer* v4l2_dequeue_v4l2buf(V4L2Context *ctx, int timeout) + .fd = ctx_to_m2mctx(ctx)->fd, + }; + int i, ret; ++ int no_rx_means_done = 0; + +- if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx->buffers) { ++ if (is_capture && ctx->bufrefs) { + for (i = 0; i < ctx->num_buffers; i++) { +- if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER) ++ avbuf = (V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (avbuf->status == V4L2BUF_IN_DRIVER) break; } -+#if 1 if (i == ctx->num_buffers) - av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers returned to " + av_log(logger(ctx), AV_LOG_WARNING, "All capture buffers (%d) returned to " @@ -47297,75 +47195,215 @@ index 29b144ed73..a8590d0ea1 100644 "to prevent device deadlock or dropped " - "packets/frames.\n"); + "packets/frames.\n", i); -+#endif } ++#if 0 ++ // I think this is true but pointless ++ // we will get some other form of EOF signal ++ /* if we are draining and there are no more capture buffers queued in the driver we are done */ -@@ -329,11 +347,16 @@ start: +- if (!V4L2_TYPE_IS_OUTPUT(ctx->type) && ctx_to_m2mctx(ctx)->draining) { ++ if (is_capture && ctx_to_m2mctx(ctx)->draining) { + for (i = 0; i < ctx->num_buffers; i++) { + /* capture buffer initialization happens during decode hence + * detection happens at runtime + */ +- if (!ctx->buffers) ++ if (!ctx->bufrefs) + break; + +- if (ctx->buffers[i].status == V4L2BUF_IN_DRIVER) ++ avbuf = (V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (avbuf->status == V4L2BUF_IN_DRIVER) + goto start; + } + ctx->done = 1; + return NULL; } ++#endif + + start: +- if (V4L2_TYPE_IS_OUTPUT(ctx->type)) +- pfd.events = POLLOUT | POLLWRNORM; +- else { ++ if (is_capture) { + /* no need to listen to requests for more input while draining */ + if (ctx_to_m2mctx(ctx)->draining) + pfd.events = POLLIN | POLLRDNORM | POLLPRI; ++ } else { ++ pfd.events = POLLOUT | POLLWRNORM; + } ++ no_rx_means_done = s->resize_pending && is_capture; for (;;) { - ret = poll(&pfd, 1, timeout); -+ int t2 = timeout < 0 ? 3000 : timeout; -+ int e = pfd.events; ++ // If we have a resize pending then all buffers should be Qed ++ // With a resize pending we should be in drain but evidence suggests ++ // that not all decoders do this so poll to clear ++ int t2 = no_rx_means_done ? 0 : timeout < 0 ? 3000 : timeout; ++ const int e = pfd.events; ++ + ret = poll(&pfd, 1, t2); ++ if (ret > 0) break; - if (errno == EINTR) - continue; -+ if (timeout == -1) { -+ av_log(logger(ctx), AV_LOG_ERROR, "=== poll unexpected TIMEOUT: events=%#x, cap buffers=%d\n", e, count_in_driver(ctx));; +- if (errno == EINTR) +- continue; ++ ++ if (ret < 0) { ++ int err = errno; ++ if (err == EINTR) ++ continue; ++ av_log(logger(ctx), AV_LOG_ERROR, "=== poll error %d (%s): events=%#x, cap buffers=%d\n", ++ err, strerror(err), ++ e, count_in_driver(ctx)); ++ return NULL; + } ++ ++ // ret == 0 (timeout) ++ if (no_rx_means_done) { ++ av_log(logger(ctx), AV_LOG_DEBUG, "Ctx done on timeout\n"); ++ ret = ctx_done(ctx); ++ if (ret > 0) ++ goto start; ++ } ++ if (timeout == -1) ++ av_log(logger(ctx), AV_LOG_ERROR, "=== poll unexpected TIMEOUT: events=%#x, cap buffers=%d\n", e, count_in_driver(ctx));; return NULL; } -@@ -398,23 +421,43 @@ dequeue: - if (ret) { - if (errno != EAGAIN) { - ctx->done = 1; +@@ -343,7 +450,8 @@ start: + no need to raise a warning */ + if (timeout == 0) { + for (i = 0; i < ctx->num_buffers; i++) { +- if (ctx->buffers[i].status != V4L2BUF_AVAILABLE) ++ avbuf = (V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (avbuf->status != V4L2BUF_AVAILABLE) + av_log(logger(ctx), AV_LOG_WARNING, "%s POLLERR\n", ctx->name); + } + } +@@ -361,22 +469,25 @@ start: + ctx->done = 1; + return NULL; + } +- if (ret) { +- /* if re-init was successful drop the buffer (if there was one) +- * since we had to reconfigure capture (unmap all buffers) +- */ +- return NULL; +- } ++ if (ret > 0) ++ goto start; + } + + /* 2. dequeue the buffer */ + if (pfd.revents & (POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM)) { + +- if (!V4L2_TYPE_IS_OUTPUT(ctx->type)) { ++ if (is_capture) { + /* there is a capture buffer ready */ + if (pfd.revents & (POLLIN | POLLRDNORM)) + goto dequeue; + ++ // CAPTURE Q drained ++ if (no_rx_means_done) { ++ if (ctx_done(ctx) > 0) ++ goto start; ++ return NULL; ++ } ++ + /* the driver is ready to accept more input; instead of waiting for the capture + * buffer to complete we return NULL so input can proceed (we are single threaded) + */ +@@ -394,37 +505,58 @@ dequeue: + buf.m.planes = planes; + } + +- ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf); +- if (ret) { +- if (errno != EAGAIN) { +- ctx->done = 1; - if (errno != EPIPE) -+// if (errno != EPIPE) ++ while ((ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_DQBUF, &buf)) == -1) { ++ const int err = errno; ++ if (err == EINTR) ++ continue; ++ if (err != EAGAIN) { ++ // EPIPE on CAPTURE can be used instead of BUF_FLAG_LAST ++ if (err != EPIPE || !is_capture) av_log(logger(ctx), AV_LOG_DEBUG, "%s VIDIOC_DQBUF, errno (%s)\n", - ctx->name, av_err2str(AVERROR(errno))); +- ctx->name, av_err2str(AVERROR(errno))); ++ ctx->name, av_err2str(AVERROR(err))); ++ if (ctx_done(ctx) > 0) ++ goto start; } return NULL; } + --ctx->q_count; -+ av_log(logger(ctx), AV_LOG_TRACE, "--- %s VIDIOC_DQBUF OK: index=%d, count=%d\n", -+ ctx->name, buf.index, ctx->q_count); ++ av_log(logger(ctx), AV_LOG_DEBUG, "--- %s VIDIOC_DQBUF OK: index=%d, ts=%ld.%06ld, count=%d, dq=%d\n", ++ ctx->name, buf.index, ++ buf.timestamp.tv_sec, buf.timestamp.tv_usec, ++ ctx->q_count, ++ctx->dq_count); + ++ avbuf = (V4L2Buffer *)ctx->bufrefs[buf.index]->data; ++ avbuf->status = V4L2BUF_AVAILABLE; ++ avbuf->buf = buf; ++ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { ++ memcpy(avbuf->planes, planes, sizeof(planes)); ++ avbuf->buf.m.planes = avbuf->planes; ++ } - if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) { +- if (ctx_to_m2mctx(ctx)->draining && !V4L2_TYPE_IS_OUTPUT(ctx->type)) { ++ if (ctx_to_m2mctx(ctx)->draining && is_capture) { int bytesused = V4L2_TYPE_IS_MULTIPLANAR(buf.type) ? buf.m.planes[0].bytesused : buf.bytesused; if (bytesused == 0) { -+ av_log(logger(ctx), AV_LOG_TRACE, "Buffer empty - reQ\n"); +- ctx->done = 1; ++ av_log(logger(ctx), AV_LOG_DEBUG, "Buffer empty - reQ\n"); + + // Must reQ so we don't leak -+ ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_QBUF, &buf); -+ if (ret) { -+ av_log(logger(ctx), AV_LOG_WARNING, "%s VIDIOC_QBUF, errno (%s): reQ empty buf failed\n", -+ ctx->name, av_err2str(AVERROR(errno))); -+ } -+ else { -+ ++ctx->q_count; -+ av_log(logger(ctx), AV_LOG_TRACE, "--- %s VIDIOC_QBUF OK: index=%d, count=%d\n", -+ ctx->name, buf.index, ctx->q_count); -+ } ++ // May not matter if the next thing we do is release all the ++ // buffers but better to be tidy. ++ ff_v4l2_buffer_enqueue(avbuf); + - ctx->done = 1; ++ if (ctx_done(ctx) > 0) ++ goto start; return NULL; } #ifdef V4L2_BUF_FLAG_LAST - if (buf.flags & V4L2_BUF_FLAG_LAST) -+ if (buf.flags & V4L2_BUF_FLAG_LAST){ +- ctx->done = 1; ++ if (buf.flags & V4L2_BUF_FLAG_LAST) { + av_log(logger(ctx), AV_LOG_TRACE, "FLAG_LAST set\n"); - ctx->done = 1; ++ avbuf->status = V4L2BUF_IN_USE; // Avoid flushing this buffer ++ ctx_done(ctx); + } #endif } -@@ -452,25 +495,62 @@ static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx) +- avbuf = &ctx->buffers[buf.index]; +- avbuf->status = V4L2BUF_AVAILABLE; +- avbuf->buf = buf; +- if (V4L2_TYPE_IS_MULTIPLANAR(ctx->type)) { +- memcpy(avbuf->planes, planes, sizeof(planes)); +- avbuf->buf.m.planes = avbuf->planes; +- } + return avbuf; + } + +@@ -443,8 +575,9 @@ static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx) + } + + for (i = 0; i < ctx->num_buffers; i++) { +- if (ctx->buffers[i].status == V4L2BUF_AVAILABLE) +- return &ctx->buffers[i]; ++ V4L2Buffer * const avbuf = (V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (avbuf->status == V4L2BUF_AVAILABLE) ++ return avbuf; + } + + return NULL; +@@ -452,25 +585,45 @@ static V4L2Buffer* v4l2_getfree_v4l2buf(V4L2Context *ctx) static int v4l2_release_buffers(V4L2Context* ctx) { @@ -47374,68 +47412,58 @@ index 29b144ed73..a8590d0ea1 100644 - .type = ctx->type, - .count = 0, /* 0 -> unmaps buffers from the driver */ - }; - int i, j; +- int i, j; ++ int i; + int ret = 0; + const int fd = ctx_to_m2mctx(ctx)->fd; - for (i = 0; i < ctx->num_buffers; i++) { - V4L2Buffer *buffer = &ctx->buffers[i]; - - for (j = 0; j < buffer->num_planes; j++) { - struct V4L2Plane_info *p = &buffer->plane_info[j]; +- for (i = 0; i < ctx->num_buffers; i++) { +- V4L2Buffer *buffer = &ctx->buffers[i]; ++ // Orphan any buffers in the wild ++ ff_weak_link_break(&ctx->wl_master); + -+ if (V4L2_TYPE_IS_OUTPUT(ctx->type)) { -+ /* output buffers are not EXPORTED */ -+ goto unmap; -+ } ++ if (ctx->bufrefs) { ++ for (i = 0; i < ctx->num_buffers; i++) ++ av_buffer_unref(ctx->bufrefs + i); ++ } + -+ if (ctx_to_m2mctx(ctx)->output_drm) { -+ /* use the DRM frame to close */ -+ if (buffer->drm_frame.objects[j].fd >= 0) { -+ if (close(buffer->drm_frame.objects[j].fd) < 0) { -+ av_log(logger(ctx), AV_LOG_ERROR, "%s close drm fd " -+ "[buffer=%2d, plane=%d, fd=%2d] - %s \n", -+ ctx->name, i, j, buffer->drm_frame.objects[j].fd, -+ av_err2str(AVERROR(errno))); -+ } -+ } -+ } -+unmap: - if (p->mm_addr && p->length) - if (munmap(p->mm_addr, p->length) < 0) -- av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno))); -+ av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", -+ ctx->name, av_err2str(AVERROR(errno))); - } - } - -- return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req); + if (fd != -1) { + struct v4l2_requestbuffers req = { + .memory = V4L2_MEMORY_MMAP, + .type = ctx->type, + .count = 0, /* 0 -> unmap all buffers from the driver */ + }; + +- for (j = 0; j < buffer->num_planes; j++) { +- struct V4L2Plane_info *p = &buffer->plane_info[j]; +- if (p->mm_addr && p->length) +- if (munmap(p->mm_addr, p->length) < 0) +- av_log(logger(ctx), AV_LOG_ERROR, "%s unmap plane (%s))\n", ctx->name, av_err2str(AVERROR(errno))); ++ while ((ret = ioctl(fd, VIDIOC_REQBUFS, &req)) == -1) { ++ if (errno == EINTR) ++ continue; + -+ ret = ioctl(fd, VIDIOC_REQBUFS, &req); -+ if (ret < 0) { -+ av_log(logger(ctx), AV_LOG_ERROR, "release all %s buffers (%s)\n", -+ ctx->name, av_err2str(AVERROR(errno))); ++ ret = AVERROR(errno); + -+ if (ctx_to_m2mctx(ctx)->output_drm) -+ av_log(logger(ctx), AV_LOG_ERROR, -+ "Make sure the DRM client releases all FB/GEM objects before closing the codec (ie):\n" -+ "for all buffers: \n" -+ " 1. drmModeRmFB(..)\n" -+ " 2. drmIoctl(.., DRM_IOCTL_GEM_CLOSE,... )\n"); -+ } -+ } ++ av_log(logger(ctx), AV_LOG_ERROR, "release all %s buffers (%s)\n", ++ ctx->name, av_err2str(AVERROR(errno))); + ++ if (ctx_to_m2mctx(ctx)->output_drm) ++ av_log(logger(ctx), AV_LOG_ERROR, ++ "Make sure the DRM client releases all FB/GEM objects before closing the codec (ie):\n" ++ "for all buffers: \n" ++ " 1. drmModeRmFB(..)\n" ++ " 2. drmIoctl(.., DRM_IOCTL_GEM_CLOSE,... )\n"); + } + } ++ ctx->q_count = 0; + +- return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_REQBUFS, &req); + return ret; } static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfmt) -@@ -499,6 +579,8 @@ static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfm +@@ -499,6 +652,8 @@ static inline int v4l2_try_raw_format(V4L2Context* ctx, enum AVPixelFormat pixfm static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p) { @@ -47444,7 +47472,7 @@ index 29b144ed73..a8590d0ea1 100644 enum AVPixelFormat pixfmt = ctx->av_pix_fmt; struct v4l2_fmtdesc fdesc; int ret; -@@ -517,6 +599,13 @@ static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p) +@@ -517,6 +672,13 @@ static int v4l2_get_raw_format(V4L2Context* ctx, enum AVPixelFormat *p) if (ret) return AVERROR(EINVAL); @@ -47458,26 +47486,85 @@ index 29b144ed73..a8590d0ea1 100644 pixfmt = ff_v4l2_format_v4l2_to_avfmt(fdesc.pixelformat, AV_CODEC_ID_RAWVIDEO); ret = v4l2_try_raw_format(ctx, pixfmt); if (ret){ -@@ -575,10 +664,16 @@ int ff_v4l2_context_set_status(V4L2Context* ctx, uint32_t cmd) +@@ -569,18 +731,73 @@ static int v4l2_get_coded_format(V4L2Context* ctx, uint32_t *p) + * + *****************************************************************************/ + ++ ++static void flush_all_buffers_status(V4L2Context* const ctx) ++{ ++ int i; ++ for (i = 0; i < ctx->num_buffers; ++i) { ++ struct V4L2Buffer * const buf = (struct V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (buf->status == V4L2BUF_IN_DRIVER) ++ buf->status = V4L2BUF_AVAILABLE; ++ } ++ ctx->q_count = 0; ++} ++ ++static int stuff_all_buffers(AVCodecContext * avctx, V4L2Context* ctx) ++{ ++ int i; ++ int rv; ++ ++ if (!ctx->bufrefs) { ++ rv = ff_v4l2_context_init(ctx); ++ if (rv) { ++ av_log(avctx, AV_LOG_ERROR, "can't request capture buffers\n"); ++ return rv; ++ } ++ } ++ ++ for (i = 0; i < ctx->num_buffers; ++i) { ++ struct V4L2Buffer * const buf = (struct V4L2Buffer *)ctx->bufrefs[i]->data; ++ if (buf->status == V4L2BUF_AVAILABLE) { ++ rv = ff_v4l2_buffer_enqueue(buf); ++ if (rv < 0) ++ return rv; ++ } ++ } ++ return 0; ++} ++ + int ff_v4l2_context_set_status(V4L2Context* ctx, uint32_t cmd) + { + int type = ctx->type; int ret; ++ AVCodecContext * const avctx = logger(ctx); ++ ++ ff_mutex_lock(&ctx->lock); ++ ++ if (cmd == VIDIOC_STREAMON && !V4L2_TYPE_IS_OUTPUT(ctx->type)) ++ stuff_all_buffers(avctx, ctx); ret = ioctl(ctx_to_m2mctx(ctx)->fd, cmd, &type); - if (ret < 0) - return AVERROR(errno); + if (ret < 0) { + const int err = errno; -+ av_log(logger(ctx), AV_LOG_ERROR, "%s set status %d (%s) failed: err=%d\n", ctx->name, ++ av_log(avctx, AV_LOG_ERROR, "%s set status %d (%s) failed: err=%d\n", ctx->name, + cmd, (cmd == VIDIOC_STREAMON) ? "ON" : "OFF", err); -+ return AVERROR(err); ++ ret = AVERROR(err); ++ } ++ else ++ { ++ if (cmd == VIDIOC_STREAMOFF) ++ flush_all_buffers_status(ctx); + +- ctx->streamon = (cmd == VIDIOC_STREAMON); ++ ctx->streamon = (cmd == VIDIOC_STREAMON); ++ av_log(avctx, AV_LOG_DEBUG, "%s set status %d (%s) OK\n", ctx->name, ++ cmd, (cmd == VIDIOC_STREAMON) ? "ON" : "OFF"); + } - ctx->streamon = (cmd == VIDIOC_STREAMON); -+ av_log(logger(ctx), AV_LOG_DEBUG, "%s set status %d (%s) OK\n", ctx->name, -+ cmd, (cmd == VIDIOC_STREAMON) ? "ON" : "OFF"); - - return 0; +- return 0; ++ ff_mutex_unlock(&ctx->lock); ++ ++ return ret; } -@@ -608,7 +703,8 @@ int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame) + + int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame) +@@ -608,7 +825,8 @@ int ff_v4l2_context_enqueue_frame(V4L2Context* ctx, const AVFrame* frame) return ff_v4l2_buffer_enqueue(avbuf); } @@ -47487,7 +47574,7 @@ index 29b144ed73..a8590d0ea1 100644 { V4L2m2mContext *s = ctx_to_m2mctx(ctx); V4L2Buffer* avbuf; -@@ -616,8 +712,9 @@ int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt) +@@ -616,8 +834,9 @@ int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt) if (!pkt->size) { ret = v4l2_stop_decode(ctx); @@ -47498,13 +47585,17 @@ index 29b144ed73..a8590d0ea1 100644 s->draining = 1; return 0; } -@@ -626,14 +723,14 @@ int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt) +@@ -626,14 +845,17 @@ int ff_v4l2_context_enqueue_packet(V4L2Context* ctx, const AVPacket* pkt) if (!avbuf) return AVERROR(EAGAIN); - ret = ff_v4l2_buffer_avpkt_to_buf(pkt, avbuf); +- if (ret) + ret = ff_v4l2_buffer_avpkt_to_buf_ext(pkt, avbuf, extdata, extlen, no_rescale_pts); - if (ret) ++ if (ret == AVERROR(ENOMEM)) ++ av_log(logger(ctx), AV_LOG_ERROR, "Buffer overflow in %s: pkt->size=%d > buf->length=%d\n", ++ __func__, pkt->size, avbuf->planes[0].length); ++ else if (ret) return ret; return ff_v4l2_buffer_enqueue(avbuf); @@ -47515,7 +47606,7 @@ index 29b144ed73..a8590d0ea1 100644 { V4L2Buffer *avbuf; -@@ -650,7 +747,7 @@ int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout) +@@ -650,7 +872,7 @@ int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* frame, int timeout) return AVERROR(EAGAIN); } @@ -47524,20 +47615,247 @@ index 29b144ed73..a8590d0ea1 100644 } int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt) +@@ -702,78 +924,161 @@ int ff_v4l2_context_get_format(V4L2Context* ctx, int probe) + + int ff_v4l2_context_set_format(V4L2Context* ctx) + { +- return ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format); ++ int ret; ++ ++ av_log(logger(ctx), AV_LOG_INFO, "Try with %d\n", ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage); ++ ++ ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format); ++ av_log(logger(ctx), AV_LOG_INFO, "Got %d\n", ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage); ++ if (ret != 0) ++ return ret; ++ ++ // Check returned size against min size and if smaller have another go ++ // Only worry about plane[0] as this is meant to enforce limits for ++ // encoded streams where we might know a bit more about the shape ++ // than the driver ++ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) { ++ if (ctx->min_buf_size <= ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage) ++ return 0; ++ ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage = ctx->min_buf_size; ++ } ++ else { ++ if (ctx->min_buf_size <= ctx->format.fmt.pix.sizeimage) ++ return 0; ++ ctx->format.fmt.pix.sizeimage = ctx->min_buf_size; ++ } ++ ++ av_log(logger(ctx), AV_LOG_INFO, "Retry with %d\n", ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage); ++ ++ ret = ioctl(ctx_to_m2mctx(ctx)->fd, VIDIOC_S_FMT, &ctx->format); ++ av_log(logger(ctx), AV_LOG_INFO, "Got %d\n", ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage); ++ return ret; + } + + void ff_v4l2_context_release(V4L2Context* ctx) + { + int ret; + +- if (!ctx->buffers) ++ if (!ctx->bufrefs) + return; + + ret = v4l2_release_buffers(ctx); + if (ret) + av_log(logger(ctx), AV_LOG_WARNING, "V4L2 failed to unmap the %s buffers\n", ctx->name); + +- av_freep(&ctx->buffers); ++ av_freep(&ctx->bufrefs); ++ av_buffer_unref(&ctx->frames_ref); ++ ++ ff_mutex_destroy(&ctx->lock); + } + +-int ff_v4l2_context_init(V4L2Context* ctx) ++ ++static int create_buffers(V4L2Context* const ctx, const unsigned int req_buffers) + { +- V4L2m2mContext *s = ctx_to_m2mctx(ctx); ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); + struct v4l2_requestbuffers req; +- int ret, i; +- +- if (!v4l2_type_supported(ctx)) { +- av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type); +- return AVERROR_PATCHWELCOME; +- } +- +- ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format); +- if (ret) +- av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed\n", ctx->name); ++ int ret; ++ int i; + + memset(&req, 0, sizeof(req)); +- req.count = ctx->num_buffers; ++ req.count = req_buffers; + req.memory = V4L2_MEMORY_MMAP; + req.type = ctx->type; +- ret = ioctl(s->fd, VIDIOC_REQBUFS, &req); +- if (ret < 0) { +- av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, strerror(errno)); +- return AVERROR(errno); ++ while ((ret = ioctl(s->fd, VIDIOC_REQBUFS, &req)) == -1) { ++ if (errno != EINTR) { ++ ret = AVERROR(errno); ++ av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_REQBUFS failed: %s\n", ctx->name, av_err2str(ret)); ++ return ret; ++ } + } + + ctx->num_buffers = req.count; +- ctx->buffers = av_mallocz(ctx->num_buffers * sizeof(V4L2Buffer)); +- if (!ctx->buffers) { ++ ctx->bufrefs = av_mallocz(ctx->num_buffers * sizeof(*ctx->bufrefs)); ++ if (!ctx->bufrefs) { + av_log(logger(ctx), AV_LOG_ERROR, "%s malloc enomem\n", ctx->name); +- return AVERROR(ENOMEM); ++ goto fail_release; + } + +- for (i = 0; i < req.count; i++) { +- ctx->buffers[i].context = ctx; +- ret = ff_v4l2_buffer_initialize(&ctx->buffers[i], i); +- if (ret < 0) { ++ ctx->wl_master = ff_weak_link_new(ctx); ++ if (!ctx->wl_master) { ++ ret = AVERROR(ENOMEM); ++ goto fail_release; ++ } ++ ++ for (i = 0; i < ctx->num_buffers; i++) { ++ ret = ff_v4l2_buffer_initialize(&ctx->bufrefs[i], i, ctx); ++ if (ret) { + av_log(logger(ctx), AV_LOG_ERROR, "%s buffer[%d] initialization (%s)\n", ctx->name, i, av_err2str(ret)); +- goto error; ++ goto fail_release; + } + } + + av_log(logger(ctx), AV_LOG_DEBUG, "%s: %s %02d buffers initialized: %04ux%04u, sizeimage %08u, bytesperline %08u\n", ctx->name, + V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? av_fourcc2str(ctx->format.fmt.pix_mp.pixelformat) : av_fourcc2str(ctx->format.fmt.pix.pixelformat), + req.count, +- v4l2_get_width(&ctx->format), +- v4l2_get_height(&ctx->format), ++ ff_v4l2_get_format_width(&ctx->format), ++ ff_v4l2_get_format_height(&ctx->format), + V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage : ctx->format.fmt.pix.sizeimage, + V4L2_TYPE_IS_MULTIPLANAR(ctx->type) ? ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline : ctx->format.fmt.pix.bytesperline); + + return 0; + +-error: ++fail_release: + v4l2_release_buffers(ctx); ++ av_freep(&ctx->bufrefs); ++ return ret; ++} ++ ++int ff_v4l2_context_init(V4L2Context* ctx) ++{ ++ V4L2m2mContext * const s = ctx_to_m2mctx(ctx); ++ int ret; ++ ++ // It is not valid to reinit a context without a previous release ++ av_assert0(ctx->bufrefs == NULL); ++ ++ if (!v4l2_type_supported(ctx)) { ++ av_log(logger(ctx), AV_LOG_ERROR, "type %i not supported\n", ctx->type); ++ return AVERROR_PATCHWELCOME; ++ } + +- av_freep(&ctx->buffers); ++ ff_mutex_init(&ctx->lock, NULL); ++ ++ if (s->output_drm) { ++ AVHWFramesContext *hwframes; ++ ++ ctx->frames_ref = av_hwframe_ctx_alloc(s->device_ref); ++ if (!ctx->frames_ref) { ++ ret = AVERROR(ENOMEM); ++ goto fail_unlock; ++ } ++ ++ hwframes = (AVHWFramesContext*)ctx->frames_ref->data; ++ hwframes->format = AV_PIX_FMT_DRM_PRIME; ++ hwframes->sw_format = ctx->av_pix_fmt; ++ hwframes->width = ctx->width; ++ hwframes->height = ctx->height; ++ ret = av_hwframe_ctx_init(ctx->frames_ref); ++ if (ret < 0) ++ goto fail_unref_hwframes; ++ } ++ ++ ret = ioctl(s->fd, VIDIOC_G_FMT, &ctx->format); ++ if (ret) { ++ ret = AVERROR(errno); ++ av_log(logger(ctx), AV_LOG_ERROR, "%s VIDIOC_G_FMT failed: %s\n", ctx->name, av_err2str(ret)); ++ goto fail_unref_hwframes; ++ } ++ ++ ret = create_buffers(ctx, ctx->num_buffers); ++ if (ret < 0) ++ goto fail_unref_hwframes; ++ ++ return 0; + ++fail_unref_hwframes: ++ av_buffer_unref(&ctx->frames_ref); ++fail_unlock: ++ ff_mutex_destroy(&ctx->lock); + return ret; + } diff --git a/libavcodec/v4l2_context.h b/libavcodec/v4l2_context.h -index 22a9532444..5588e4a460 100644 +index 22a9532444..70190e3079 100644 --- a/libavcodec/v4l2_context.h +++ b/libavcodec/v4l2_context.h -@@ -92,6 +92,8 @@ typedef struct V4L2Context { +@@ -31,6 +31,7 @@ + #include "libavutil/pixfmt.h" + #include "libavutil/frame.h" + #include "libavutil/buffer.h" ++#include "libavutil/thread.h" + #include "v4l2_buffers.h" + + typedef struct V4L2Context { +@@ -70,11 +71,18 @@ typedef struct V4L2Context { + */ + int width, height; + AVRational sample_aspect_ratio; ++ struct v4l2_rect selection; + + /** +- * Indexed array of V4L2Buffers ++ * If the default size of buffer is less than this then try to ++ * set to this. + */ +- V4L2Buffer *buffers; ++ uint32_t min_buf_size; ++ ++ /** ++ * Indexed array of pointers to V4L2Buffers ++ */ ++ AVBufferRef **bufrefs; + + /** + * Readonly after init. +@@ -92,6 +100,12 @@ typedef struct V4L2Context { */ int done; + AVBufferRef *frames_ref; + int q_count; ++ int dq_count; ++ struct ff_weak_link_master *wl_master; ++ ++ AVMutex lock; } V4L2Context; /** -@@ -156,9 +158,12 @@ int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt); +@@ -156,9 +170,12 @@ int ff_v4l2_context_dequeue_packet(V4L2Context* ctx, AVPacket* pkt); * @param[in] ctx The V4L2Context to dequeue from. * @param[inout] f The AVFrame to dequeue to. * @param[in] timeout The timeout for dequeue (-1 to block, 0 to return immediately, or milliseconds) @@ -47551,7 +47869,7 @@ index 22a9532444..5588e4a460 100644 /** * Enqueues a buffer to a V4L2Context from an AVPacket -@@ -170,7 +175,7 @@ int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* f, int timeout); +@@ -170,7 +187,7 @@ int ff_v4l2_context_dequeue_frame(V4L2Context* ctx, AVFrame* f, int timeout); * @param[in] pkt A pointer to an AVPacket. * @return 0 in case of success, a negative error otherwise. */ @@ -47561,10 +47879,24 @@ index 22a9532444..5588e4a460 100644 /** * Enqueues a buffer to a V4L2Context from an AVFrame diff --git a/libavcodec/v4l2_m2m.c b/libavcodec/v4l2_m2m.c -index e48b3a8ccf..092b750dc4 100644 +index e48b3a8ccf..4f3bcd3a51 100644 --- a/libavcodec/v4l2_m2m.c +++ b/libavcodec/v4l2_m2m.c -@@ -328,7 +328,10 @@ static void v4l2_m2m_destroy_context(void *opaque, uint8_t *context) +@@ -215,13 +215,7 @@ int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *s) + av_log(log_ctx, AV_LOG_ERROR, "capture VIDIOC_STREAMOFF\n"); + + /* 2. unmap the capture buffers (v4l2 and ffmpeg): +- * we must wait for all references to be released before being allowed +- * to queue new buffers. + */ +- av_log(log_ctx, AV_LOG_DEBUG, "waiting for user to release AVBufferRefs\n"); +- if (atomic_load(&s->refcount)) +- while(sem_wait(&s->refsync) == -1 && errno == EINTR); +- + ff_v4l2_context_release(&s->capture); + + /* 3. get the new capture format */ +@@ -328,7 +322,10 @@ static void v4l2_m2m_destroy_context(void *opaque, uint8_t *context) ff_v4l2_context_release(&s->capture); sem_destroy(&s->refsync); @@ -47576,7 +47908,7 @@ index e48b3a8ccf..092b750dc4 100644 av_free(s); } -@@ -338,17 +341,34 @@ int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv) +@@ -338,17 +335,34 @@ int ff_v4l2_m2m_codec_end(V4L2m2mPriv *priv) V4L2m2mContext *s = priv->context; int ret; @@ -47618,7 +47950,7 @@ index e48b3a8ccf..092b750dc4 100644 return 0; diff --git a/libavcodec/v4l2_m2m.h b/libavcodec/v4l2_m2m.h -index 456281f48c..b08a5b38ac 100644 +index 456281f48c..6f2d0d75eb 100644 --- a/libavcodec/v4l2_m2m.h +++ b/libavcodec/v4l2_m2m.h @@ -30,6 +30,7 @@ @@ -47649,7 +47981,15 @@ index 456281f48c..b08a5b38ac 100644 typedef struct V4L2m2mContext { char devname[PATH_MAX]; -@@ -63,6 +75,23 @@ typedef struct V4L2m2mContext { +@@ -53,6 +65,7 @@ typedef struct V4L2m2mContext { + sem_t refsync; + atomic_uint refcount; + int reinit; ++ int resize_pending; + + /* null frame/packet received */ + int draining; +@@ -63,6 +76,23 @@ typedef struct V4L2m2mContext { /* reference back to V4L2m2mPriv */ void *priv; @@ -47673,7 +48013,7 @@ index 456281f48c..b08a5b38ac 100644 } V4L2m2mContext; typedef struct V4L2m2mPriv { -@@ -73,6 +102,7 @@ typedef struct V4L2m2mPriv { +@@ -73,6 +103,7 @@ typedef struct V4L2m2mPriv { int num_output_buffers; int num_capture_buffers; @@ -47681,8 +48021,25 @@ index 456281f48c..b08a5b38ac 100644 } V4L2m2mPriv; /** +@@ -126,4 +157,16 @@ int ff_v4l2_m2m_codec_reinit(V4L2m2mContext *ctx); + */ + int ff_v4l2_m2m_codec_full_reinit(V4L2m2mContext *ctx); + ++ ++static inline unsigned int ff_v4l2_get_format_width(struct v4l2_format *fmt) ++{ ++ return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.width : fmt->fmt.pix.width; ++} ++ ++static inline unsigned int ff_v4l2_get_format_height(struct v4l2_format *fmt) ++{ ++ return V4L2_TYPE_IS_MULTIPLANAR(fmt->type) ? fmt->fmt.pix_mp.height : fmt->fmt.pix.height; ++} ++ ++ + #endif /* AVCODEC_V4L2_M2M_H */ diff --git a/libavcodec/v4l2_m2m_dec.c b/libavcodec/v4l2_m2m_dec.c -index 3e17e0fcac..a02012bf44 100644 +index 3e17e0fcac..b9f28220a8 100644 --- a/libavcodec/v4l2_m2m_dec.c +++ b/libavcodec/v4l2_m2m_dec.c @@ -23,6 +23,10 @@ @@ -47756,7 +48113,7 @@ index 3e17e0fcac..a02012bf44 100644 if (capture->streamon) return 0; -@@ -63,8 +92,14 @@ static int v4l2_try_start(AVCodecContext *avctx) +@@ -63,15 +92,29 @@ static int v4l2_try_start(AVCodecContext *avctx) } /* 2.1 update the AVCodecContext */ @@ -47772,8 +48129,41 @@ index 3e17e0fcac..a02012bf44 100644 + avctx->pix_fmt = capture->av_pix_fmt; /* 3. set the crop parameters */ ++#if 1 ++ selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ selection.target = V4L2_SEL_TGT_CROP_DEFAULT; ++ ret = ioctl(s->fd, VIDIOC_G_SELECTION, &selection); ++ av_log(avctx, AV_LOG_INFO, "Post G selection ret=%d, err=%d %dx%d\n", ret, errno, selection.r.width, selection.r.height); ++#else selection.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -@@ -133,54 +168,293 @@ static int v4l2_prepare_decoder(V4L2m2mContext *s) + selection.r.height = avctx->coded_height; + selection.r.width = avctx->coded_width; ++ av_log(avctx, AV_LOG_INFO, "Try selection %dx%d\n", avctx->coded_width, avctx->coded_height); + ret = ioctl(s->fd, VIDIOC_S_SELECTION, &selection); +- if (!ret) { ++ av_log(avctx, AV_LOG_INFO, "Post S selection ret=%d, err=%d %dx%d\n", ret, errno, selection.r.width, selection.r.height); ++ if (1) { + ret = ioctl(s->fd, VIDIOC_G_SELECTION, &selection); + if (ret) { + av_log(avctx, AV_LOG_WARNING, "VIDIOC_G_SELECTION ioctl\n"); +@@ -82,15 +125,7 @@ static int v4l2_try_start(AVCodecContext *avctx) + capture->width = selection.r.width; + } + } +- +- /* 4. init the capture context now that we have the capture format */ +- if (!capture->buffers) { +- ret = ff_v4l2_context_init(capture); +- if (ret) { +- av_log(avctx, AV_LOG_ERROR, "can't request capture buffers\n"); +- return AVERROR(ENOMEM); +- } +- } ++#endif + + /* 5. start the capture process */ + ret = ff_v4l2_context_set_status(capture, VIDIOC_STREAMON); +@@ -133,52 +168,312 @@ static int v4l2_prepare_decoder(V4L2m2mContext *s) return 0; } @@ -48070,8 +48460,8 @@ index 3e17e0fcac..a02012bf44 100644 + src_rv < 0 ? src_rv : + dst_rv < 0 ? dst_rv : + AVERROR(EAGAIN); - } - ++} ++ +#if 0 +#include +static int64_t us_time(void) @@ -48094,33 +48484,66 @@ index 3e17e0fcac..a02012bf44 100644 +} +#endif + ++static uint32_t max_coded_size(const AVCodecContext * const avctx) ++{ ++ uint32_t wxh = avctx->coded_width * avctx->coded_height; ++ uint32_t size; ++ ++ // Currently the only thing we try to set our own limits for is H264 ++ if (avctx->codec_id != AV_CODEC_ID_H264) ++ return 0; ++ ++ size = wxh * 3 / 2; ++ // H.264 Annex A table A-1 gives minCR which is either 2 or 4 ++ // unfortunately that doesn't yield an actually useful limit ++ // and it should be noted that frame 0 is special cased to allow ++ // a bigger number which really isn't helpful for us. So just pick ++ // frame_size / 2 ++ size /= 2; ++ // Add 64k to allow for any overheads and/or encoder hopefulness ++ // with small WxH ++ return size + (1 << 16); + } + static av_cold int v4l2_decode_init(AVCodecContext *avctx) - { +@@ -186,8 +481,12 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) V4L2Context *capture, *output; -@@ -188,6 +462,9 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) + V4L2m2mContext *s; V4L2m2mPriv *priv = avctx->priv_data; ++ int gf_pix_fmt; int ret; + av_log(avctx, AV_LOG_TRACE, "<<< %s\n", __func__); -+ avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; + ++ av_log(avctx, AV_LOG_INFO, "level=%d\n", avctx->level); ret = ff_v4l2_m2m_create_context(priv, &s); if (ret < 0) return ret; -@@ -208,13 +485,32 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) +@@ -204,17 +503,43 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) + + output->av_codec_id = avctx->codec_id; + output->av_pix_fmt = AV_PIX_FMT_NONE; ++ output->min_buf_size = max_coded_size(avctx); + capture->av_codec_id = AV_CODEC_ID_RAWVIDEO; capture->av_pix_fmt = avctx->pix_fmt; - ++ capture->min_buf_size = 0; ++ + /* the client requests the codec to generate DRM frames: + * - data[0] will therefore point to the returned AVDRMFrameDescriptor + * check the ff_v4l2_buffer_to_avframe conversion function. + * - the DRM frame format is passed in the DRM frame descriptor layer. + * check the v4l2_get_drm_frame function. + */ -+ switch (ff_get_format(avctx, avctx->codec->pix_fmts)) { -+ default: ++ ++ gf_pix_fmt = ff_get_format(avctx, avctx->codec->pix_fmts); ++ av_log(avctx, AV_LOG_DEBUG, "avctx requested=%d (%s); get_format requested=%d (%s)\n", ++ avctx->pix_fmt, av_get_pix_fmt_name(avctx->pix_fmt), gf_pix_fmt, av_get_pix_fmt_name(gf_pix_fmt)); ++ ++ s->output_drm = 0; ++ if (gf_pix_fmt == AV_PIX_FMT_DRM_PRIME || avctx->pix_fmt == AV_PIX_FMT_DRM_PRIME) { ++ avctx->pix_fmt = AV_PIX_FMT_DRM_PRIME; + s->output_drm = 1; -+ break; + } + + s->device_ref = av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_DRM); @@ -48132,7 +48555,7 @@ index 3e17e0fcac..a02012bf44 100644 + ret = av_hwdevice_ctx_init(s->device_ref); + if (ret < 0) + return ret; -+ + s->avctx = avctx; ret = ff_v4l2_m2m_codec_init(priv); if (ret) { @@ -48143,7 +48566,7 @@ index 3e17e0fcac..a02012bf44 100644 return ret; } -@@ -223,10 +519,58 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) +@@ -223,10 +548,53 @@ static av_cold int v4l2_decode_init(AVCodecContext *avctx) static av_cold int v4l2_decode_close(AVCodecContext *avctx) { @@ -48184,11 +48607,6 @@ index 3e17e0fcac..a02012bf44 100644 + if (ret < 0) + av_log(avctx, AV_LOG_ERROR, "VIDIOC_STREAMOFF %s error: %d\n", output->name, ret); + -+ for (i = 0; i < output->num_buffers; i++) { -+ if (output->buffers[i].status == V4L2BUF_IN_DRIVER) -+ output->buffers[i].status = V4L2BUF_AVAILABLE; -+ } -+ + // V4L2 makes no guarantees about whether decoded frames are flushed or not + // so mark all frames we are tracking to be discarded if they appear + for (i = 0; i != FF_V4L2_M2M_TRACK_SIZE; ++i) @@ -48206,7 +48624,7 @@ index 3e17e0fcac..a02012bf44 100644 } #define OFFSET(x) offsetof(V4L2m2mPriv, x) -@@ -235,10 +579,16 @@ static av_cold int v4l2_decode_close(AVCodecContext *avctx) +@@ -235,10 +603,16 @@ static av_cold int v4l2_decode_close(AVCodecContext *avctx) static const AVOption options[] = { V4L_M2M_DEFAULT_OPTS, { "num_capture_buffers", "Number of buffers in the capture context", @@ -48224,7 +48642,7 @@ index 3e17e0fcac..a02012bf44 100644 #define M2MDEC_CLASS(NAME) \ static const AVClass v4l2_m2m_ ## NAME ## _dec_class = { \ .class_name = #NAME "_v4l2m2m_decoder", \ -@@ -259,9 +609,14 @@ static const AVOption options[] = { +@@ -259,9 +633,15 @@ static const AVOption options[] = { .init = v4l2_decode_init, \ .receive_frame = v4l2_receive_frame, \ .close = v4l2_decode_close, \ @@ -48235,218 +48653,2797 @@ index 3e17e0fcac..a02012bf44 100644 + .caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_CLEANUP, \ + .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_DRM_PRIME, \ + AV_PIX_FMT_NV12, \ ++ AV_PIX_FMT_YUV420P, \ + AV_PIX_FMT_NONE}, \ + .hw_configs = v4l2_m2m_hw_configs, \ .wrapper_name = "v4l2m2m", \ } -diff --git a/libavcodec/v4l2_m2m_enc.c b/libavcodec/v4l2_m2m_enc.c -index 32321f392f..9f1b2c2ffc 100644 ---- a/libavcodec/v4l2_m2m_enc.c -+++ b/libavcodec/v4l2_m2m_enc.c -@@ -416,6 +416,7 @@ static const AVCodecDefault v4l2_m2m_defaults[] = { - .close = v4l2_encode_close, \ - .defaults = v4l2_m2m_defaults, \ - .capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \ -+ .caps_internal = FF_CODEC_CAP_INIT_CLEANUP, \ - .wrapper_name = "v4l2m2m", \ - } - -diff --git a/libavcodec/v4l2_phase.c b/libavcodec/v4l2_phase.c +diff --git a/libavcodec/v4l2_req_devscan.c b/libavcodec/v4l2_req_devscan.c new file mode 100644 -index 0000000000..0a7f6abd33 +index 0000000000..59724a8104 --- /dev/null -+++ b/libavcodec/v4l2_phase.c -@@ -0,0 +1,140 @@ -+// v4l2_phase.c ++++ b/libavcodec/v4l2_req_devscan.c +@@ -0,0 +1,452 @@ ++#include ++#include ++#include ++#include ++#include ++#include + ++#include ++#include ++ ++#include ++#include ++ ++#include "v4l2_req_devscan.h" ++#include "v4l2_req_utils.h" ++ ++struct decdev { ++ enum v4l2_buf_type src_type; ++ uint32_t src_fmt_v4l2; ++ const char * vname; ++ const char * mname; ++}; ++ ++struct devscan { ++ struct decdev env; ++ unsigned int dev_size; ++ unsigned int dev_count; ++ struct decdev *devs; ++}; ++ ++static int video_src_pixfmt_supported(uint32_t fmt) ++{ ++ return 1; ++} ++ ++static void v4l2_setup_format(struct v4l2_format *format, unsigned int type, ++ unsigned int width, unsigned int height, ++ unsigned int pixelformat) ++{ ++ unsigned int sizeimage; ++ ++ memset(format, 0, sizeof(*format)); ++ format->type = type; ++ ++ sizeimage = V4L2_TYPE_IS_OUTPUT(type) ? 4 * 1024 * 1024 : 0; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { ++ format->fmt.pix_mp.width = width; ++ format->fmt.pix_mp.height = height; ++ format->fmt.pix_mp.plane_fmt[0].sizeimage = sizeimage; ++ format->fmt.pix_mp.pixelformat = pixelformat; ++ } else { ++ format->fmt.pix.width = width; ++ format->fmt.pix.height = height; ++ format->fmt.pix.sizeimage = sizeimage; ++ format->fmt.pix.pixelformat = pixelformat; ++ } ++} ++ ++static int v4l2_set_format(int video_fd, unsigned int type, unsigned int pixelformat, ++ unsigned int width, unsigned int height) ++{ ++ struct v4l2_format format; ++ ++ v4l2_setup_format(&format, type, width, height, pixelformat); ++ ++ return ioctl(video_fd, VIDIOC_S_FMT, &format) ? -errno : 0; ++} ++ ++static int v4l2_query_capabilities(int video_fd, unsigned int *capabilities) ++{ ++ struct v4l2_capability capability = { 0 }; ++ int rc; ++ ++ rc = ioctl(video_fd, VIDIOC_QUERYCAP, &capability); ++ if (rc < 0) ++ return -errno; ++ ++ if (capabilities != NULL) { ++ if ((capability.capabilities & V4L2_CAP_DEVICE_CAPS) != 0) ++ *capabilities = capability.device_caps; ++ else ++ *capabilities = capability.capabilities; ++ } ++ ++ return 0; ++} ++ ++static int devscan_add(struct devscan *const scan, ++ enum v4l2_buf_type src_type, ++ uint32_t src_fmt_v4l2, ++ const char * vname, ++ const char * mname) ++{ ++ struct decdev *d; ++ ++ if (scan->dev_size <= scan->dev_count) { ++ unsigned int n = !scan->dev_size ? 4 : scan->dev_size * 2; ++ d = realloc(scan->devs, n * sizeof(*d)); ++ if (!d) ++ return -ENOMEM; ++ scan->devs = d; ++ scan->dev_size = n; ++ } ++ ++ d = scan->devs + scan->dev_count; ++ d->src_type = src_type; ++ d->src_fmt_v4l2 = src_fmt_v4l2; ++ d->vname = strdup(vname); ++ if (!d->vname) ++ return -ENOMEM; ++ d->mname = strdup(mname); ++ if (!d->mname) { ++ free((char *)d->vname); ++ return -ENOMEM; ++ } ++ ++scan->dev_count; ++ return 0; ++} ++ ++void devscan_delete(struct devscan **const pScan) ++{ ++ unsigned int i; ++ struct devscan * const scan = *pScan; ++ ++ if (!scan) ++ return; ++ *pScan = NULL; ++ ++ for (i = 0; i < scan->dev_count; ++i) { ++ free((char*)scan->devs[i].mname); ++ free((char*)scan->devs[i].vname); ++ } ++ free(scan->devs); ++ free(scan); ++} ++ ++#define REQ_BUF_CAPS (\ ++ V4L2_BUF_CAP_SUPPORTS_DMABUF |\ ++ V4L2_BUF_CAP_SUPPORTS_REQUESTS |\ ++ V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) ++ ++static void probe_formats(void * const dc, ++ struct devscan *const scan, ++ const int fd, ++ const unsigned int type_v4l2, ++ const char *const mpath, ++ const char *const vpath) ++{ ++ unsigned int i; ++ for (i = 0;; ++i) { ++ struct v4l2_fmtdesc fmtdesc = { ++ .index = i, ++ .type = type_v4l2 ++ }; ++ struct v4l2_requestbuffers rbufs = { ++ .count = 0, ++ .type = type_v4l2, ++ .memory = V4L2_MEMORY_MMAP ++ }; ++ while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) { ++ if (errno == EINTR) ++ continue; ++ if (errno != EINVAL) ++ request_err(dc, "Enum[%d] failed for type=%d\n", i, type_v4l2); ++ return; ++ } ++ if (!video_src_pixfmt_supported(fmtdesc.pixelformat)) ++ continue; ++ ++ if (v4l2_set_format(fd, type_v4l2, fmtdesc.pixelformat, 720, 480)) { ++ request_debug(dc, "Set failed for type=%d, pf=%.4s\n", type_v4l2, (char*)&fmtdesc.pixelformat); ++ continue; ++ } ++ ++ while (ioctl(fd, VIDIOC_REQBUFS, &rbufs)) { ++ if (errno != EINTR) { ++ request_debug(dc, "%s: Reqbufs failed\n", vpath); ++ continue; ++ } ++ } ++ ++ if ((rbufs.capabilities & REQ_BUF_CAPS) != REQ_BUF_CAPS) { ++ request_debug(dc, "%s: Buf caps %#x insufficient\n", vpath, rbufs.capabilities); ++ continue; ++ } ++ ++ request_debug(dc, "Adding: %s,%s pix=%#x, type=%d\n", ++ mpath, vpath, fmtdesc.pixelformat, type_v4l2); ++ devscan_add(scan, type_v4l2, fmtdesc.pixelformat, vpath, mpath); ++ } ++} ++ ++ ++static int probe_video_device(void * const dc, ++ struct udev_device *const device, ++ struct devscan *const scan, ++ const char *const mpath) ++{ ++ int ret; ++ unsigned int capabilities = 0; ++ int video_fd = -1; ++ ++ const char *path = udev_device_get_devnode(device); ++ if (!path) { ++ request_err(dc, "%s: get video device devnode failed\n", __func__); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ video_fd = open(path, O_RDWR, 0); ++ if (video_fd == -1) { ++ ret = -errno; ++ request_err(dc, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno); ++ goto fail; ++ } ++ ++ ret = v4l2_query_capabilities(video_fd, &capabilities); ++ if (ret < 0) { ++ request_err(dc, "%s: get video capability failed, %s (%d)\n", __func__, strerror(-ret), -ret); ++ goto fail; ++ } ++ ++ request_debug(dc, "%s: path=%s capabilities=%#x\n", __func__, path, capabilities); ++ ++ if (!(capabilities & V4L2_CAP_STREAMING)) { ++ request_debug(dc, "%s: missing required streaming capability\n", __func__); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ if (!(capabilities & (V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_VIDEO_M2M))) { ++ request_debug(dc, "%s: missing required mem2mem capability\n", __func__); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ /* Should check capture formats too... */ ++ if ((capabilities & V4L2_CAP_VIDEO_M2M) != 0) ++ probe_formats(dc, scan, video_fd, V4L2_BUF_TYPE_VIDEO_OUTPUT, mpath, path); ++ if ((capabilities & V4L2_CAP_VIDEO_M2M_MPLANE) != 0) ++ probe_formats(dc, scan, video_fd, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, mpath, path); ++ ++ close(video_fd); ++ return 0; ++ ++fail: ++ if (video_fd >= 0) ++ close(video_fd); ++ return ret; ++} ++ ++static int probe_media_device(void * const dc, ++ struct udev_device *const device, ++ struct devscan *const scan) ++{ ++ int ret; ++ int rv; ++ struct media_device_info device_info = { 0 }; ++ struct media_v2_topology topology = { 0 }; ++ struct media_v2_interface *interfaces = NULL; ++ struct udev *udev = udev_device_get_udev(device); ++ struct udev_device *video_device; ++ dev_t devnum; ++ int media_fd = -1; ++ ++ const char *path = udev_device_get_devnode(device); ++ if (!path) { ++ request_err(dc, "%s: get media device devnode failed\n", __func__); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ media_fd = open(path, O_RDWR, 0); ++ if (media_fd < 0) { ++ ret = -errno; ++ request_err(dc, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(-ret), -ret); ++ goto fail; ++ } ++ ++ rv = ioctl(media_fd, MEDIA_IOC_DEVICE_INFO, &device_info); ++ if (rv < 0) { ++ ret = -errno; ++ request_err(dc, "%s: get media device info failed, %s (%d)\n", __func__, strerror(-ret), -ret); ++ goto fail; ++ } ++ ++ rv = ioctl(media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); ++ if (rv < 0) { ++ ret = -errno; ++ request_err(dc, "%s: get media topology failed, %s (%d)\n", __func__, strerror(-ret), -ret); ++ goto fail; ++ } ++ ++ if (topology.num_interfaces <= 0) { ++ request_err(dc, "%s: media device has no interfaces\n", __func__); ++ ret = -EINVAL; ++ goto fail; ++ } ++ ++ interfaces = calloc(topology.num_interfaces, sizeof(*interfaces)); ++ if (!interfaces) { ++ request_err(dc, "%s: allocating media interface struct failed\n", __func__); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ topology.ptr_interfaces = (__u64)(uintptr_t)interfaces; ++ rv = ioctl(media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); ++ if (rv < 0) { ++ ret = -errno; ++ request_err(dc, "%s: get media topology failed, %s (%d)\n", __func__, strerror(-ret), -ret); ++ goto fail; ++ } ++ ++ for (int i = 0; i < topology.num_interfaces; i++) { ++ if (interfaces[i].intf_type != MEDIA_INTF_T_V4L_VIDEO) ++ continue; ++ ++ devnum = makedev(interfaces[i].devnode.major, interfaces[i].devnode.minor); ++ video_device = udev_device_new_from_devnum(udev, 'c', devnum); ++ if (!video_device) { ++ ret = -errno; ++ request_err(dc, "%s: video_device[%d]=%p\n", __func__, i, video_device); ++ continue; ++ } ++ ++ ret = probe_video_device(dc, video_device, scan, path); ++ udev_device_unref(video_device); ++ ++ if (ret != 0) ++ goto fail; ++ } ++ ++ free(interfaces); ++ return ret; ++ ++fail: ++ free(interfaces); ++ if (media_fd != -1) ++ close(media_fd); ++ return ret; ++} ++ ++const char *decdev_media_path(const struct decdev *const dev) ++{ ++ return !dev ? NULL : dev->mname; ++} ++ ++const char *decdev_video_path(const struct decdev *const dev) ++{ ++ return !dev ? NULL : dev->vname; ++} ++ ++enum v4l2_buf_type decdev_src_type(const struct decdev *const dev) ++{ ++ return !dev ? 0 : dev->src_type; ++} ++ ++uint32_t decdev_src_pixelformat(const struct decdev *const dev) ++{ ++ return !dev ? 0 : dev->src_fmt_v4l2; ++} ++ ++ ++const struct decdev *devscan_find(struct devscan *const scan, ++ const uint32_t src_fmt_v4l2) ++{ ++ unsigned int i; ++ ++ if (scan->env.mname && scan->env.vname) ++ return &scan->env; ++ ++ if (!src_fmt_v4l2) ++ return scan->dev_count ? scan->devs + 0 : NULL; ++ ++ for (i = 0; i != scan->dev_count; ++i) { ++ if (scan->devs[i].src_fmt_v4l2 == src_fmt_v4l2) ++ return scan->devs + i; ++ } ++ return NULL; ++} ++ ++int devscan_build(void * const dc, struct devscan **pscan) ++{ ++ int ret; ++ struct udev *udev; ++ struct udev_enumerate *enumerate; ++ struct udev_list_entry *devices; ++ struct udev_list_entry *entry; ++ struct udev_device *device; ++ struct devscan * scan; ++ ++ *pscan = NULL; ++ ++ scan = calloc(1, sizeof(*scan)); ++ if (!scan) { ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ scan->env.mname = getenv("LIBVA_V4L2_REQUEST_MEDIA_PATH"); ++ scan->env.vname = getenv("LIBVA_V4L2_REQUEST_VIDEO_PATH"); ++ if (scan->env.mname && scan->env.vname) { ++ request_info(dc, "Media/video device env overrides found: %s,%s\n", ++ scan->env.mname, scan->env.vname); ++ *pscan = scan; ++ return 0; ++ } ++ ++ udev = udev_new(); ++ if (!udev) { ++ request_err(dc, "%s: allocating udev context failed\n", __func__); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ enumerate = udev_enumerate_new(udev); ++ if (!enumerate) { ++ request_err(dc, "%s: allocating udev enumerator failed\n", __func__); ++ ret = -ENOMEM; ++ goto fail; ++ } ++ ++ udev_enumerate_add_match_subsystem(enumerate, "media"); ++ udev_enumerate_scan_devices(enumerate); ++ ++ devices = udev_enumerate_get_list_entry(enumerate); ++ udev_list_entry_foreach(entry, devices) { ++ const char *path = udev_list_entry_get_name(entry); ++ if (!path) ++ continue; ++ ++ device = udev_device_new_from_syspath(udev, path); ++ if (!device) ++ continue; ++ ++ probe_media_device(dc, device, scan); ++ udev_device_unref(device); ++ } ++ ++ udev_enumerate_unref(enumerate); ++ ++ *pscan = scan; ++ return 0; ++ ++fail: ++ udev_unref(udev); ++ devscan_delete(&scan); ++ return ret; ++} ++ +diff --git a/libavcodec/v4l2_req_devscan.h b/libavcodec/v4l2_req_devscan.h +new file mode 100644 +index 0000000000..0baef36535 +--- /dev/null ++++ b/libavcodec/v4l2_req_devscan.h +@@ -0,0 +1,21 @@ ++#ifndef _DEVSCAN_H_ ++#define _DEVSCAN_H_ ++ ++struct devscan; ++struct decdev; ++enum v4l2_buf_type; ++ ++/* These return pointers to data in the devscan structure and so are vaild ++ * for the lifetime of that ++ */ ++const char *decdev_media_path(const struct decdev *const dev); ++const char *decdev_video_path(const struct decdev *const dev); ++enum v4l2_buf_type decdev_src_type(const struct decdev *const dev); ++uint32_t decdev_src_pixelformat(const struct decdev *const dev); ++ ++const struct decdev *devscan_find(struct devscan *const scan, const uint32_t src_fmt_v4l2); ++ ++int devscan_build(void * const dc, struct devscan **pscan); ++void devscan_delete(struct devscan **const pScan); ++ ++#endif +diff --git a/libavcodec/v4l2_req_dmabufs.c b/libavcodec/v4l2_req_dmabufs.c +new file mode 100644 +index 0000000000..828da1dd05 +--- /dev/null ++++ b/libavcodec/v4l2_req_dmabufs.c +@@ -0,0 +1,241 @@ +#include -+#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "v4l2_req_dmabufs.h" ++#include "v4l2_req_utils.h" ++ ++#define DMABUF_NAME1 "/dev/dma_heap/linux,cma" ++#define DMABUF_NAME2 "/dev/dma_heap/reserved" ++ ++ ++struct dmabufs_ctl { ++ int fd; ++ size_t page_size; ++}; ++ ++struct dmabuf_h { ++ int fd; ++ size_t size; ++ size_t len; ++ void * mapptr; ++}; ++ ++struct dmabuf_h * dmabuf_import(int fd, size_t size) ++{ ++ struct dmabuf_h *dh; ++ ++ fd = dup(fd); ++ if (fd < 0 || size == 0) ++ return NULL; ++ ++ dh = malloc(sizeof(*dh)); ++ if (!dh) { ++ close(fd); ++ return NULL; ++ } ++ ++ *dh = (struct dmabuf_h) { ++ .fd = fd, ++ .size = size, ++ .mapptr = MAP_FAILED ++ }; ++ return dh; ++} ++ ++struct dmabuf_h * dmabuf_realloc(struct dmabufs_ctl * dbsc, struct dmabuf_h * old, size_t size) ++{ ++ struct dmabuf_h * dh; ++ struct dma_heap_allocation_data data = { ++ .len = (size + dbsc->page_size - 1) & ~(dbsc->page_size - 1), ++ .fd = 0, ++ .fd_flags = O_RDWR, ++ .heap_flags = 0 ++ }; ++ ++ if (old != NULL) { ++ if (old->size == data.len) { ++ return old; ++ } ++ dmabuf_free(old); ++ } ++ ++ if (size == 0 || ++ (dh = malloc(sizeof(*dh))) == NULL) ++ return NULL; ++ ++ while (ioctl(dbsc->fd, DMA_HEAP_IOCTL_ALLOC, &data)) { ++ int err = errno; ++ request_log("Failed to alloc %" PRIu64 " from dma-heap(fd=%d): %d (%s)\n", ++ data.len, ++ dbsc->fd, ++ err, ++ strerror(err)); ++ if (err == EINTR) ++ continue; ++ goto fail; ++ } ++ ++ *dh = (struct dmabuf_h){ ++ .fd = data.fd, ++ .size = (size_t)data.len, ++ .mapptr = MAP_FAILED ++ }; ++ ++ return dh; ++ ++fail: ++ free(dh); ++ return NULL; ++} ++ ++int dmabuf_sync(struct dmabuf_h * const dh, unsigned int flags) ++{ ++ struct dma_buf_sync sync = { ++ .flags = flags ++ }; ++ while (ioctl(dh->fd, DMA_BUF_IOCTL_SYNC, &sync) == -1) { ++ const int err = errno; ++ if (errno == EINTR) ++ continue; ++ request_log("%s: ioctl failed: flags=%#x\n", __func__, flags); ++ return -err; ++ } ++ return 0; ++} ++ ++int dmabuf_write_start(struct dmabuf_h * const dh) ++{ ++ return dmabuf_sync(dh, DMA_BUF_SYNC_START | DMA_BUF_SYNC_WRITE); ++} ++ ++int dmabuf_write_end(struct dmabuf_h * const dh) ++{ ++ return dmabuf_sync(dh, DMA_BUF_SYNC_END | DMA_BUF_SYNC_WRITE); ++} ++ ++int dmabuf_read_start(struct dmabuf_h * const dh) ++{ ++ if (!dmabuf_map(dh)) ++ return -1; ++ return dmabuf_sync(dh, DMA_BUF_SYNC_START | DMA_BUF_SYNC_READ); ++} ++ ++int dmabuf_read_end(struct dmabuf_h * const dh) ++{ ++ return dmabuf_sync(dh, DMA_BUF_SYNC_END | DMA_BUF_SYNC_READ); ++} ++ ++ ++void * dmabuf_map(struct dmabuf_h * const dh) ++{ ++ if (!dh) ++ return NULL; ++ if (dh->mapptr != MAP_FAILED) ++ return dh->mapptr; ++ dh->mapptr = mmap(NULL, dh->size, ++ PROT_READ | PROT_WRITE, ++ MAP_SHARED | MAP_POPULATE, ++ dh->fd, 0); ++ if (dh->mapptr == MAP_FAILED) { ++ request_log("%s: Map failed\n", __func__); ++ return NULL; ++ } ++ return dh->mapptr; ++} ++ ++int dmabuf_fd(const struct dmabuf_h * const dh) ++{ ++ if (!dh) ++ return -1; ++ return dh->fd; ++} ++ ++size_t dmabuf_size(const struct dmabuf_h * const dh) ++{ ++ if (!dh) ++ return 0; ++ return dh->size; ++} ++ ++size_t dmabuf_len(const struct dmabuf_h * const dh) ++{ ++ if (!dh) ++ return 0; ++ return dh->len; ++} ++ ++void dmabuf_len_set(struct dmabuf_h * const dh, const size_t len) ++{ ++ dh->len = len; ++} ++ ++ ++ ++void dmabuf_free(struct dmabuf_h * dh) ++{ ++ if (!dh) ++ return; ++ ++ if (dh->mapptr != MAP_FAILED) ++ munmap(dh->mapptr, dh->size); ++ while (close(dh->fd) == -1 && errno == EINTR) ++ /* loop */; ++ free(dh); ++} ++ ++struct dmabufs_ctl * dmabufs_ctl_new(void) ++{ ++ struct dmabufs_ctl * dbsc = malloc(sizeof(*dbsc)); ++ ++ if (!dbsc) ++ return NULL; ++ ++ while ((dbsc->fd = open(DMABUF_NAME1, O_RDWR)) == -1 && ++ errno == EINTR) ++ /* Loop */; ++ ++ if (dbsc->fd == -1) { ++ while ((dbsc->fd = open(DMABUF_NAME2, O_RDWR)) == -1 && ++ errno == EINTR) ++ /* Loop */; ++ if (dbsc->fd == -1) { ++ request_log("Unable to open either %s or %s\n", ++ DMABUF_NAME1, DMABUF_NAME2); ++ goto fail; ++ } ++ } ++ ++ dbsc->page_size = (size_t)sysconf(_SC_PAGE_SIZE); ++ ++ return dbsc; ++ ++fail: ++ free(dbsc); ++ return NULL; ++} ++ ++void dmabufs_ctl_delete(struct dmabufs_ctl ** const pDbsc) ++{ ++ struct dmabufs_ctl * const dbsc = *pDbsc; ++ ++ if (!dbsc) ++ return; ++ *pDbsc = NULL; ++ ++ while (close(dbsc->fd) == -1 && errno == EINTR) ++ /* loop */; ++ ++ free(dbsc); ++} ++ ++ +diff --git a/libavcodec/v4l2_req_dmabufs.h b/libavcodec/v4l2_req_dmabufs.h +new file mode 100644 +index 0000000000..8d909c4297 +--- /dev/null ++++ b/libavcodec/v4l2_req_dmabufs.h +@@ -0,0 +1,38 @@ ++#ifndef DMABUFS_H ++#define DMABUFS_H ++ ++struct dmabufs_ctl; ++struct dmabuf_h; ++ ++struct dmabufs_ctl * dmabufs_ctl_new(void); ++void dmabufs_ctl_delete(struct dmabufs_ctl ** const pdbsc); ++ ++// Need not preserve old contents ++// On NULL return old buffer is freed ++struct dmabuf_h * dmabuf_realloc(struct dmabufs_ctl * dbsc, struct dmabuf_h *, size_t size); ++ ++static inline struct dmabuf_h * dmabuf_alloc(struct dmabufs_ctl * dbsc, size_t size) { ++ return dmabuf_realloc(dbsc, NULL, size); ++} ++/* Create from existing fd - dups(fd) */ ++struct dmabuf_h * dmabuf_import(int fd, size_t size); ++void * dmabuf_map(struct dmabuf_h * const dh); ++ ++/* flags from linux/dmabuf.h DMA_BUF_SYNC_xxx */ ++int dmabuf_sync(struct dmabuf_h * const dh, unsigned int flags); ++ ++int dmabuf_write_start(struct dmabuf_h * const dh); ++int dmabuf_write_end(struct dmabuf_h * const dh); ++int dmabuf_read_start(struct dmabuf_h * const dh); ++int dmabuf_read_end(struct dmabuf_h * const dh); ++ ++int dmabuf_fd(const struct dmabuf_h * const dh); ++/* Allocated size */ ++size_t dmabuf_size(const struct dmabuf_h * const dh); ++/* Bytes in use */ ++size_t dmabuf_len(const struct dmabuf_h * const dh); ++/* Set bytes in use */ ++void dmabuf_len_set(struct dmabuf_h * const dh, const size_t len); ++void dmabuf_free(struct dmabuf_h * dh); ++ ++#endif +diff --git a/libavcodec/v4l2_req_media.c b/libavcodec/v4l2_req_media.c +new file mode 100644 +index 0000000000..659835fcb7 +--- /dev/null ++++ b/libavcodec/v4l2_req_media.c +@@ -0,0 +1,1510 @@ ++/* ++ * Copyright (C) 2018 Paul Kocialkowski ++ * ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#include ++#include ++#include +#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include + -+#include "libavutil/log.h" -+#include "v4l2_phase.h" ++#include + -+typedef struct phase_envss { -+ unsigned int last_order; ++#include "v4l2_req_dmabufs.h" ++#include "v4l2_req_media.h" ++#include "v4l2_req_pollqueue.h" ++#include "v4l2_req_utils.h" ++#include "weak_link.h" ++ ++ ++/* floor(log2(x)) */ ++static unsigned int log2_size(size_t x) ++{ ++ unsigned int n = 0; ++ ++ if (x & ~0xffff) { ++ n += 16; ++ x >>= 16; ++ } ++ if (x & ~0xff) { ++ n += 8; ++ x >>= 8; ++ } ++ if (x & ~0xf) { ++ n += 4; ++ x >>= 4; ++ } ++ if (x & ~3) { ++ n += 2; ++ x >>= 2; ++ } ++ return (x & ~1) ? n + 1 : n; ++} ++ ++static size_t round_up_size(const size_t x) ++{ ++ /* Admit no size < 256 */ ++ const unsigned int n = x < 256 ? 8 : log2_size(x) - 1; ++ ++ return x >= (3 << n) ? 4 << n : (3 << n); ++} ++ ++struct media_request; ++ ++struct media_pool { ++ int fd; ++ sem_t sem; + pthread_mutex_t lock; -+ pthread_cond_t cond; -+} phase_env; ++ struct media_request * free_reqs; ++ struct pollqueue * pq; ++}; + -+struct V4L2PhaseControl { -+ unsigned int order; -+ unsigned int phase_count; -+ phase_env p[V4L2PHASE_PHASE_COUNT]; ++struct media_request { ++ struct media_request * next; ++ struct media_pool * mp; ++ int fd; ++ struct polltask * pt; +}; + + -+unsigned int ff_v4l2_phase_order_next(V4L2PhaseControl * const pc) ++static inline int do_trywait(sem_t *const sem) +{ -+ return ++pc->order; -+} -+ -+// Phase isn't required but it acts as a check that we know what we are doing -+int -+ff_v4l2_phase_claim(V4L2PhaseInfo * const pi, unsigned int phase) -+{ -+ V4L2PhaseControl *const pc = pi->ctrl; -+ phase_env * const p = pc->p + phase; -+ -+ if (pi->n2 != phase * 2) { -+ av_log(NULL, AV_LOG_ERROR, "%s: Unexpected phase: req=%d, cur=%d/%d\n", __func__, phase, pi->n2 >> 1, pi->n2 & 1); -+ return -1; ++ while (sem_trywait(sem)) { ++ if (errno != EINTR) ++ return -errno; + } -+ -+ pthread_mutex_lock(&p->lock); -+ -+ while (pi->order != p->last_order + 1) { -+ pthread_cond_wait(&p->cond, &p->lock); -+ } -+ -+ pi->n2++; -+ pthread_mutex_unlock(&p->lock); + return 0; +} + -+int -+ff_v4l2_phase_release(V4L2PhaseInfo * const pi, unsigned int phase) ++static inline int do_wait(sem_t *const sem) +{ -+ V4L2PhaseControl *const pc = pi->ctrl; -+ phase_env * const p = pc->p + phase; -+ -+ if (pi->n2 != ((phase << 1) | 1)) { -+ av_log(NULL, AV_LOG_ERROR, "%s: Unexpected phase: req=%d, cur=%d/%d\n", __func__, phase, pi->n2 >> 1, pi->n2 & 1); -+ return -1; ++ while (sem_wait(sem)) { ++ if (errno != EINTR) ++ return -errno; + } -+ -+ if (pi->order != p->last_order + 1) { -+ av_log(NULL, AV_LOG_ERROR, "%s: order_mismatch\n", __func__); -+ return -1; -+ } -+ -+ pthread_mutex_lock(&p->lock); -+ p->last_order = pi->order; -+ pi->n2++; -+ pthread_cond_broadcast(&p->cond); -+ pthread_mutex_unlock(&p->lock); + return 0; +} + -+// Init the PhaseInfo, assign a new order, claim phase 0 -+int -+ff_v4l2_phase_start(V4L2PhaseInfo * const pi, V4L2PhaseControl * const pc) ++static int request_buffers(int video_fd, unsigned int type, ++ enum v4l2_memory memory, unsigned int buffers_count) +{ -+ pi->n2 = 0; -+ pi->ctrl = pc; -+ pi->order = ff_v4l2_phase_order_next(pc); -+ return ff_v4l2_phase_claim(pi, 0); ++ struct v4l2_requestbuffers buffers; ++ int rc; ++ ++ memset(&buffers, 0, sizeof(buffers)); ++ buffers.type = type; ++ buffers.memory = memory; ++ buffers.count = buffers_count; ++ ++ rc = ioctl(video_fd, VIDIOC_REQBUFS, &buffers); ++ if (rc < 0) { ++ rc = -errno; ++ request_log("Unable to request %d type %d buffers: %s\n", buffers_count, type, strerror(-rc)); ++ return rc; ++ } ++ ++ return 0; +} + -+// Release any claimed phase and claim+release all remaining phases -+void ff_v4l2_phase_abort(V4L2PhaseInfo * const pi) -+{ -+ V4L2PhaseControl *const pc = pi->ctrl; + -+ // Nothing to do -+ if (pi->n2 == 0 || pi->n2 >= pc->phase_count * 2) ++static int set_stream(int video_fd, unsigned int type, bool enable) ++{ ++ enum v4l2_buf_type buf_type = type; ++ int rc; ++ ++ rc = ioctl(video_fd, enable ? VIDIOC_STREAMON : VIDIOC_STREAMOFF, ++ &buf_type); ++ if (rc < 0) { ++ rc = -errno; ++ request_log("Unable to %sable stream: %s\n", ++ enable ? "en" : "dis", strerror(-rc)); ++ return rc; ++ } ++ ++ return 0; ++} ++ ++ ++ ++struct media_request * media_request_get(struct media_pool * const mp) ++{ ++ struct media_request *req = NULL; ++ ++ /* Timeout handled by poll code */ ++ if (do_wait(&mp->sem)) ++ return NULL; ++ ++ pthread_mutex_lock(&mp->lock); ++ req = mp->free_reqs; ++ if (req) { ++ mp->free_reqs = req->next; ++ req->next = NULL; ++ } ++ pthread_mutex_unlock(&mp->lock); ++ return req; ++} ++ ++int media_request_fd(const struct media_request * const req) ++{ ++ return req->fd; ++} ++ ++int media_request_start(struct media_request * const req) ++{ ++ struct media_pool * const mp = req->mp; ++ ++ while (ioctl(req->fd, MEDIA_REQUEST_IOC_QUEUE, NULL) == -1) ++ { ++ const int err = errno; ++ if (err == EINTR) ++ continue; ++ request_log("%s: Failed to Q media: (%d) %s\n", __func__, err, strerror(err)); ++ return -err; ++ } ++ ++ pollqueue_add_task(mp->pq, req->pt, 2000); ++ return 0; ++} ++ ++static void media_request_done(void *v, short revents) ++{ ++ struct media_request *const req = v; ++ struct media_pool *const mp = req->mp; ++ ++ /* ** Not sure what to do about timeout */ ++ ++ if (ioctl(req->fd, MEDIA_REQUEST_IOC_REINIT, NULL) < 0) ++ request_log("Unable to reinit media request: %s\n", ++ strerror(errno)); ++ ++ pthread_mutex_lock(&mp->lock); ++ req->next = mp->free_reqs; ++ mp->free_reqs = req; ++ pthread_mutex_unlock(&mp->lock); ++ sem_post(&mp->sem); ++} ++ ++int media_request_abort(struct media_request ** const preq) ++{ ++ struct media_request * const req = *preq; ++ ++ if (req == NULL) ++ return 0; ++ *preq = NULL; ++ ++ media_request_done(req, 0); ++ return 0; ++} ++ ++static void delete_req_chain(struct media_request * const chain) ++{ ++ struct media_request * next = chain; ++ while (next) { ++ struct media_request * const req = next; ++ next = req->next; ++ if (req->fd != -1) ++ close(req->fd); ++ free(req); ++ } ++} ++ ++struct media_pool * media_pool_new(const char * const media_path, ++ struct pollqueue * const pq, ++ const unsigned int n) ++{ ++ struct media_pool * const mp = calloc(1, sizeof(*mp)); ++ unsigned int i; ++ ++ if (!mp) ++ goto fail0; ++ ++ mp->pq = pq; ++ pthread_mutex_init(&mp->lock, NULL); ++ mp->fd = open(media_path, O_RDWR | O_NONBLOCK); ++ if (mp->fd == -1) { ++ request_log("Failed to open '%s': %s\n", media_path, strerror(errno)); ++ goto fail1; ++ } ++ ++ for (i = 0; i != n; ++i) { ++ struct media_request * req = malloc(sizeof(*req)); ++ if (!req) ++ goto fail4; ++ ++ *req = (struct media_request){ ++ .next = mp->free_reqs, ++ .mp = mp, ++ .fd = -1 ++ }; ++ mp->free_reqs = req; ++ ++ if (ioctl(mp->fd, MEDIA_IOC_REQUEST_ALLOC, &req->fd) == -1) { ++ request_log("Failed to alloc request %d: %s\n", i, strerror(errno)); ++ goto fail4; ++ } ++ ++ req->pt = polltask_new(req->fd, POLLPRI, media_request_done, req); ++ if (!req->pt) ++ goto fail4; ++ } ++ ++ sem_init(&mp->sem, 0, n); ++ ++ return mp; ++ ++fail4: ++ delete_req_chain(mp->free_reqs); ++ close(mp->fd); ++ pthread_mutex_destroy(&mp->lock); ++fail1: ++ free(mp); ++fail0: ++ return NULL; ++} ++ ++void media_pool_delete(struct media_pool ** pMp) ++{ ++ struct media_pool * const mp = *pMp; ++ ++ if (!mp) + return; ++ *pMp = NULL; + -+ // Run through all remaining phases -+ do { -+ if ((pi->n2 & 1) == 0) -+ ff_v4l2_phase_claim(pi, pi->n2 >> 1); ++ delete_req_chain(mp->free_reqs); ++ close(mp->fd); ++ sem_destroy(&mp->sem); ++ pthread_mutex_destroy(&mp->lock); ++ free(mp); ++} ++ ++ ++#define INDEX_UNSET (~(uint32_t)0) ++ ++enum qent_status { ++ QENT_NEW, ++ QENT_PENDING, ++ QENT_WAITING, ++ QENT_DONE, ++ QENT_ERROR, ++ QENT_IMPORT ++}; ++ ++struct qent_base { ++ struct qent_base *next; ++ struct qent_base *prev; ++ enum qent_status status; ++ uint32_t index; ++ struct dmabuf_h *dh[VIDEO_MAX_PLANES]; ++ struct timeval timestamp; ++}; ++ ++struct qent_src { ++ struct qent_base base; ++ int fixed_size; ++}; ++ ++struct qent_dst { ++ struct qent_base base; ++ ++ bool waiting; ++ pthread_mutex_t lock; ++ pthread_cond_t cond; ++ struct ff_weak_link_client * mbc_wl; ++}; ++ ++ ++struct buf_pool { ++ pthread_mutex_t lock; ++ sem_t free_sem; ++ enum v4l2_buf_type buf_type; ++ struct qent_base *free_head; ++ struct qent_base *free_tail; ++ struct qent_base *inuse_head; ++ struct qent_base *inuse_tail; ++}; ++ ++ ++static inline struct qent_dst *base_to_dst(struct qent_base *be) ++{ ++ return (struct qent_dst *)be; ++} ++ ++static inline struct qent_src *base_to_src(struct qent_base *be) ++{ ++ return (struct qent_src *)be; ++} ++ ++ ++#define QENT_BASE_INITIALIZER {\ ++ .status = QENT_NEW,\ ++ .index = INDEX_UNSET\ ++} ++ ++static void qe_base_uninit(struct qent_base *const be) ++{ ++ unsigned int i; ++ for (i = 0; i != VIDEO_MAX_PLANES; ++i) { ++ dmabuf_free(be->dh[i]); ++ be->dh[i] = NULL; ++ } ++} ++ ++static void qe_src_delete(struct qent_src *const be_src) ++{ ++ if (!be_src) ++ return; ++ qe_base_uninit(&be_src->base); ++ free(be_src); ++} ++ ++static struct qent_src * qe_src_new(void) ++{ ++ struct qent_src *const be_src = malloc(sizeof(*be_src)); ++ if (!be_src) ++ return NULL; ++ *be_src = (struct qent_src){ ++ .base = QENT_BASE_INITIALIZER ++ }; ++ return be_src; ++} ++ ++static struct qent_dst * qe_dst_new(void) ++{ ++ struct qent_dst *const be_dst = malloc(sizeof(*be_dst)); ++ if (!be_dst) ++ return NULL; ++ *be_dst = (struct qent_dst){ ++ .base = QENT_BASE_INITIALIZER, ++ .lock = PTHREAD_MUTEX_INITIALIZER, ++ .cond = PTHREAD_COND_INITIALIZER ++ }; ++ return be_dst; ++} ++ ++ ++static void bq_put_free(struct buf_pool *const bp, struct qent_base * be) ++{ ++ if (bp->free_tail) ++ bp->free_tail->next = be; ++ else ++ bp->free_head = be; ++ be->prev = bp->free_tail; ++ be->next = NULL; ++ bp->free_tail = be; ++} ++ ++static struct qent_base * bq_get_free(struct buf_pool *const bp) ++{ ++ struct qent_base *be; ++ ++ be = bp->free_head; ++ if (be) { ++ if (be->next) ++ be->next->prev = be->prev; + else -+ ff_v4l2_phase_release(pi, pi->n2 >> 1); -+ } while (pi->n2 < pc->phase_count * 2); -+} -+ -+ -+V4L2PhaseControl * -+ff_v4l2_phase_control_new(unsigned int phase_count) -+{ -+ V4L2PhaseControl * pc; -+ unsigned int i; -+ if (phase_count > V4L2PHASE_PHASE_COUNT) -+ return NULL; -+ if ((pc = av_mallocz(sizeof(*pc))) == NULL) -+ return NULL; -+ pc->phase_count = phase_count; -+ for (i = 0; i != phase_count; ++i) { -+ phase_env * const p = pc->p + i; -+ p->last_order = 0; -+ pthread_mutex_init(&p->lock, NULL); -+ pthread_cond_init(&p->cond, NULL); ++ bp->free_tail = be->prev; ++ bp->free_head = be->next; ++ be->next = NULL; ++ be->prev = NULL; + } -+ return pc; ++ return be; +} + -+void -+ff_v4l2_phase_control_deletez(V4L2PhaseControl ** const ppc) ++static struct qent_base * bq_extract_inuse(struct buf_pool *const bp, struct qent_base *const be) ++{ ++ if (be->next) ++ be->next->prev = be->prev; ++ else ++ bp->inuse_tail = be->prev; ++ if (be->prev) ++ be->prev->next = be->next; ++ else ++ bp->inuse_head = be->next; ++ be->next = NULL; ++ be->prev = NULL; ++ return be; ++} ++ ++static void bq_free_all_free_src(struct buf_pool *const bp) ++{ ++ struct qent_base *be; ++ while ((be = bq_get_free(bp)) != NULL) ++ qe_src_delete(base_to_src(be)); ++} ++ ++static void queue_put_free(struct buf_pool *const bp, struct qent_base *be) +{ -+ V4L2PhaseControl * const pc = *ppc; + unsigned int i; + -+ if (pc == NULL) ++ pthread_mutex_lock(&bp->lock); ++ /* Clear out state vars */ ++ be->timestamp.tv_sec = 0; ++ be->timestamp.tv_usec = 0; ++ for (i = 0; i < VIDEO_MAX_PLANES && be->dh[i]; ++i) ++ dmabuf_len_set(be->dh[i], 0); ++ bq_put_free(bp, be); ++ pthread_mutex_unlock(&bp->lock); ++ sem_post(&bp->free_sem); ++} ++ ++static bool queue_is_inuse(const struct buf_pool *const bp) ++{ ++ return bp->inuse_tail != NULL; ++} ++ ++static void queue_put_inuse(struct buf_pool *const bp, struct qent_base *be) ++{ ++ if (!be) + return; -+ *ppc = NULL; ++ pthread_mutex_lock(&bp->lock); ++ if (bp->inuse_tail) ++ bp->inuse_tail->next = be; ++ else ++ bp->inuse_head = be; ++ be->prev = bp->inuse_tail; ++ be->next = NULL; ++ bp->inuse_tail = be; ++ be->status = QENT_WAITING; ++ pthread_mutex_unlock(&bp->lock); ++} + -+ for (i = 0; i != pc->phase_count; ++i) { -+ phase_env * const p = pc->p + i; -+ pthread_mutex_destroy(&p->lock); -+ pthread_cond_destroy(&p->cond); ++static struct qent_base *queue_get_free(struct buf_pool *const bp) ++{ ++ struct qent_base *buf; ++ ++ if (do_wait(&bp->free_sem)) ++ return NULL; ++ pthread_mutex_lock(&bp->lock); ++ buf = bq_get_free(bp); ++ pthread_mutex_unlock(&bp->lock); ++ return buf; ++} ++ ++static struct qent_base *queue_tryget_free(struct buf_pool *const bp) ++{ ++ struct qent_base *buf; ++ ++ if (do_trywait(&bp->free_sem)) ++ return NULL; ++ pthread_mutex_lock(&bp->lock); ++ buf = bq_get_free(bp); ++ pthread_mutex_unlock(&bp->lock); ++ return buf; ++} ++ ++static struct qent_base * queue_find_extract_fd(struct buf_pool *const bp, const int fd) ++{ ++ struct qent_base *be; ++ ++ pthread_mutex_lock(&bp->lock); ++ /* Expect 1st in Q, but allow anywhere */ ++ for (be = bp->inuse_head; be; be = be->next) { ++ if (dmabuf_fd(be->dh[0]) == fd) { ++ bq_extract_inuse(bp, be); ++ break; ++ } ++ } ++ pthread_mutex_unlock(&bp->lock); ++ ++ return be; ++} ++ ++static void queue_delete(struct buf_pool *const bp) ++{ ++ if (!bp) ++ return; ++ sem_destroy(&bp->free_sem); ++ pthread_mutex_destroy(&bp->lock); ++ free(bp); ++} ++ ++static struct buf_pool* queue_new(const int vfd, struct pollqueue * pq) ++{ ++ struct buf_pool *bp = calloc(1, sizeof(*bp)); ++ if (!bp) ++ return NULL; ++ pthread_mutex_init(&bp->lock, NULL); ++ sem_init(&bp->free_sem, 0, 0); ++ return bp; ++} ++ ++ ++struct mediabufs_ctl { ++ atomic_int ref_count; /* 0 is single ref for easier atomics */ ++ void * dc; ++ int vfd; ++ bool stream_on; ++ bool polling; ++ pthread_mutex_t lock; ++ struct buf_pool * src; ++ struct buf_pool * dst; ++ struct polltask * pt; ++ struct pollqueue * pq; ++ struct ff_weak_link_master * this_wlm; ++ ++ struct v4l2_format src_fmt; ++ struct v4l2_format dst_fmt; ++}; ++ ++static int qe_v4l2_queue(struct qent_base *const be, ++ const int vfd, struct media_request *const mreq, ++ const struct v4l2_format *const fmt, ++ const bool is_dst, const bool hold_flag) ++{ ++ struct v4l2_buffer buffer = { ++ .type = fmt->type, ++ .memory = V4L2_MEMORY_DMABUF, ++ .index = be->index ++ }; ++ struct v4l2_plane planes[VIDEO_MAX_PLANES] = {{0}}; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) { ++ unsigned int i; ++ for (i = 0; i < VIDEO_MAX_PLANES && be->dh[i]; ++i) { ++ if (is_dst) ++ dmabuf_len_set(be->dh[i], 0); ++ ++ /* *** Really need a pixdesc rather than a format so we can fill in data_offset */ ++ planes[i].length = dmabuf_size(be->dh[i]); ++ planes[i].bytesused = dmabuf_len(be->dh[i]); ++ planes[i].m.fd = dmabuf_fd(be->dh[i]); ++ } ++ buffer.m.planes = planes; ++ buffer.length = i; ++ } ++ else { ++ if (is_dst) ++ dmabuf_len_set(be->dh[0], 0); ++ ++ buffer.bytesused = dmabuf_len(be->dh[0]); ++ buffer.length = dmabuf_size(be->dh[0]); ++ buffer.m.fd = dmabuf_fd(be->dh[0]); ++ } ++ ++ if (!is_dst && mreq) { ++ buffer.flags |= V4L2_BUF_FLAG_REQUEST_FD; ++ buffer.request_fd = media_request_fd(mreq); ++ if (hold_flag) ++ buffer.flags |= V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF; ++ } ++ ++ if (is_dst) ++ be->timestamp = (struct timeval){0,0}; ++ ++ buffer.timestamp = be->timestamp; ++ ++ while (ioctl(vfd, VIDIOC_QBUF, &buffer)) { ++ const int err = errno; ++ if (err != EINTR) { ++ request_log("%s: Failed to Q buffer: err=%d (%s)\n", __func__, err, strerror(err)); ++ return -err; ++ } ++ } ++ return 0; ++} ++ ++static struct qent_base * qe_dequeue(struct buf_pool *const bp, ++ const int vfd, ++ const struct v4l2_format * const f) ++{ ++ int fd; ++ struct qent_base *be; ++ int rc; ++ const bool mp = V4L2_TYPE_IS_MULTIPLANAR(f->type); ++ struct v4l2_plane planes[VIDEO_MAX_PLANES] = {{0}}; ++ struct v4l2_buffer buffer = { ++ .type = f->type, ++ .memory = V4L2_MEMORY_DMABUF ++ }; ++ if (mp) { ++ buffer.length = f->fmt.pix_mp.num_planes; ++ buffer.m.planes = planes; ++ } ++ ++ while ((rc = ioctl(vfd, VIDIOC_DQBUF, &buffer)) != 0 && ++ errno == EINTR) ++ /* Loop */; ++ if (rc) { ++ request_log("Error DQing buffer type %d: %s\n", f->type, strerror(errno)); ++ return NULL; ++ } ++ ++ fd = mp ? planes[0].m.fd : buffer.m.fd; ++ be = queue_find_extract_fd(bp, fd); ++ if (!be) { ++ request_log("Failed to find fd %d in Q\n", fd); ++ return NULL; ++ } ++ ++ be->status = (buffer.flags & V4L2_BUF_FLAG_ERROR) ? QENT_ERROR : QENT_DONE; ++ return be; ++} ++ ++static void qe_dst_done(struct qent_dst *const dst_be) ++{ ++ pthread_mutex_lock(&dst_be->lock); ++ dst_be->waiting = false; ++ pthread_cond_broadcast(&dst_be->cond); ++ pthread_mutex_unlock(&dst_be->lock); ++} ++ ++static bool qe_dst_waiting(struct qent_dst *const dst_be) ++{ ++ bool waiting; ++ pthread_mutex_lock(&dst_be->lock); ++ waiting = dst_be->waiting; ++ dst_be->waiting = true; ++ pthread_mutex_unlock(&dst_be->lock); ++ return waiting; ++} ++ ++ ++static bool mediabufs_wants_poll(const struct mediabufs_ctl *const mbc) ++{ ++ return queue_is_inuse(mbc->src) || queue_is_inuse(mbc->dst); ++} ++ ++static void mediabufs_poll_cb(void * v, short revents) ++{ ++ struct mediabufs_ctl *mbc = v; ++ struct qent_src *src_be = NULL; ++ struct qent_dst *dst_be = NULL; ++ bool qrun = false; ++ ++ if (!revents) ++ request_err(mbc->dc, "%s: Timeout\n", __func__); ++ ++ pthread_mutex_lock(&mbc->lock); ++ mbc->polling = false; ++ ++ if ((revents & POLLOUT) != 0) ++ src_be = base_to_src(qe_dequeue(mbc->src, mbc->vfd, &mbc->src_fmt)); ++ if ((revents & POLLIN) != 0) ++ dst_be = base_to_dst(qe_dequeue(mbc->dst, mbc->vfd, &mbc->dst_fmt)); ++ ++ /* Reschedule */ ++ if (mediabufs_wants_poll(mbc)) { ++ mbc->polling = true; ++ pollqueue_add_task(mbc->pq, mbc->pt, 2000); ++ qrun = true; ++ } ++ pthread_mutex_unlock(&mbc->lock); ++ ++ if (src_be) ++ queue_put_free(mbc->src, &src_be->base); ++ if (dst_be) ++ qe_dst_done(dst_be); ++ if (!qrun) ++ mediabufs_ctl_unref(&mbc); ++} ++ ++int qent_src_params_set(struct qent_src *const be_src, const struct timeval * timestamp) ++{ ++ struct qent_base *const be = &be_src->base; ++ ++ be->timestamp = *timestamp; ++ return 0; ++} ++ ++static int qent_base_realloc(struct qent_base *const be, const size_t len, struct dmabufs_ctl * dbsc) ++{ ++ if (!be->dh[0] || len > dmabuf_size(be->dh[0])) { ++ size_t newsize = round_up_size(len); ++ request_log("%s: Overrun %d > %d; trying %d\n", __func__, len, dmabuf_size(be->dh[0]), newsize); ++ if (!dbsc || ++ (be->dh[0] = dmabuf_realloc(dbsc, be->dh[0], newsize)) == NULL) { ++ request_log("%s: Realloc %d failed\n", __func__, newsize); ++ return -ENOMEM; ++ } ++ } ++ return 0; ++} ++ ++int qent_src_alloc(struct qent_src *const be_src, const size_t len, struct dmabufs_ctl * dbsc) ++{ ++ struct qent_base *const be = &be_src->base; ++ return qent_base_realloc(be, len, dbsc); ++} ++ ++ ++int qent_src_data_copy(struct qent_src *const be_src, const size_t offset, const void *const src, const size_t len, struct dmabufs_ctl * dbsc) ++{ ++ void * dst; ++ struct qent_base *const be = &be_src->base; ++ int rv; ++ ++ // Realloc doesn't copy so don't alloc if offset != 0 ++ if ((rv = qent_base_realloc(be, offset + len, ++ be_src->fixed_size || offset ? NULL : dbsc)) != 0) ++ return rv; ++ ++ dmabuf_write_start(be->dh[0]); ++ dst = dmabuf_map(be->dh[0]); ++ if (!dst) ++ return -1; ++ memcpy((char*)dst + offset, src, len); ++ dmabuf_len_set(be->dh[0], len); ++ dmabuf_write_end(be->dh[0]); ++ return 0; ++} ++ ++const struct dmabuf_h * qent_dst_dmabuf(const struct qent_dst *const be_dst, unsigned int plane) ++{ ++ const struct qent_base *const be = &be_dst->base; ++ ++ return (plane >= sizeof(be->dh)/sizeof(be->dh[0])) ? NULL : be->dh[plane]; ++} ++ ++int qent_dst_dup_fd(const struct qent_dst *const be_dst, unsigned int plane) ++{ ++ return dup(dmabuf_fd(qent_dst_dmabuf(be_dst, plane))); ++} ++ ++MediaBufsStatus mediabufs_start_request(struct mediabufs_ctl *const mbc, ++ struct media_request **const pmreq, ++ struct qent_src **const psrc_be, ++ struct qent_dst *const dst_be, ++ const bool is_final) ++{ ++ struct media_request * mreq = *pmreq; ++ struct qent_src *const src_be = *psrc_be; ++ ++ // Req & src are always both "consumed" ++ *pmreq = NULL; ++ *psrc_be = NULL; ++ ++ pthread_mutex_lock(&mbc->lock); ++ ++ if (dst_be) { ++ if (qe_dst_waiting(dst_be)) { ++ request_info(mbc->dc, "Request buffer already waiting on start\n"); ++ goto fail1; ++ } ++ if (qe_v4l2_queue(&dst_be->base, mbc->vfd, NULL, &mbc->dst_fmt, true, false)) ++ goto fail1; ++ queue_put_inuse(mbc->dst, &dst_be->base); ++ } ++ ++ if (qe_v4l2_queue(&src_be->base, mbc->vfd, mreq, &mbc->src_fmt, false, !is_final)) ++ goto fail1; ++ queue_put_inuse(mbc->src, &src_be->base); ++ ++ if (!mbc->polling && mediabufs_wants_poll(mbc)) { ++ mbc->polling = true; ++ mediabufs_ctl_ref(mbc); ++ pollqueue_add_task(mbc->pq, mbc->pt, 2000); ++ } ++ pthread_mutex_unlock(&mbc->lock); ++ ++ if (media_request_start(mreq)) ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ ++ return MEDIABUFS_STATUS_SUCCESS; ++ ++fail1: ++ media_request_abort(&mreq); ++ queue_put_free(mbc->src, &src_be->base); ++ ++#warning If src Q fails this doesnt unwind properly - separate dst Q from src Q ++ if (dst_be) ++ qe_dst_done(dst_be); ++ pthread_mutex_unlock(&mbc->lock); ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++} ++ ++ ++static int qe_alloc_from_fmt(struct qent_base *const be, ++ struct dmabufs_ctl *const dbsc, ++ const struct v4l2_format *const fmt) ++{ ++ if (V4L2_TYPE_IS_MULTIPLANAR(fmt->type)) { ++ unsigned int i; ++ for (i = 0; i != fmt->fmt.pix_mp.num_planes; ++i) { ++ be->dh[i] = dmabuf_realloc(dbsc, be->dh[i], ++ fmt->fmt.pix_mp.plane_fmt[i].sizeimage); ++ /* On failure tidy up and die */ ++ if (!be->dh[i]) { ++ while (i--) { ++ dmabuf_free(be->dh[i]); ++ be->dh[i] = NULL; ++ } ++ return -1; ++ } ++ } ++ } ++ else { ++// be->dh[0] = dmabuf_alloc(dbsc, fmt->fmt.pix.sizeimage); ++ size_t size = fmt->fmt.pix.sizeimage; ++ be->dh[0] = dmabuf_realloc(dbsc, be->dh[0], size); ++ if (!be->dh[0]) ++ return -1; ++ } ++ return 0; ++} ++ ++static MediaBufsStatus fmt_set(struct v4l2_format *const fmt, const int fd, ++ const enum v4l2_buf_type buftype, ++ uint32_t pixfmt, ++ const unsigned int width, const unsigned int height, ++ const size_t bufsize) ++{ ++ *fmt = (struct v4l2_format){.type = buftype}; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(buftype)) { ++ fmt->fmt.pix_mp.width = width; ++ fmt->fmt.pix_mp.height = height; ++ fmt->fmt.pix_mp.pixelformat = pixfmt; ++ if (bufsize) { ++ fmt->fmt.pix_mp.num_planes = 1; ++ fmt->fmt.pix_mp.plane_fmt[0].sizeimage = bufsize; ++ } ++ } ++ else { ++ fmt->fmt.pix.width = width; ++ fmt->fmt.pix.height = height; ++ fmt->fmt.pix.pixelformat = pixfmt; ++ fmt->fmt.pix.sizeimage = bufsize; ++ } ++ ++ while (ioctl(fd, VIDIOC_S_FMT, fmt)) ++ if (errno != EINTR) ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ ++ // Treat anything where we don't get at least what we asked for as a fail ++ if (V4L2_TYPE_IS_MULTIPLANAR(buftype)) { ++ if (fmt->fmt.pix_mp.width < width || ++ fmt->fmt.pix_mp.height < height || ++ fmt->fmt.pix_mp.pixelformat != pixfmt) { ++ return MEDIABUFS_ERROR_UNSUPPORTED_BUFFERTYPE; ++ } ++ } ++ else { ++ if (fmt->fmt.pix.width < width || ++ fmt->fmt.pix.height < height || ++ fmt->fmt.pix.pixelformat != pixfmt) { ++ return MEDIABUFS_ERROR_UNSUPPORTED_BUFFERTYPE; ++ } ++ } ++ ++ return MEDIABUFS_STATUS_SUCCESS; ++} ++ ++static MediaBufsStatus find_fmt_flags(struct v4l2_format *const fmt, ++ const int fd, ++ const unsigned int type_v4l2, ++ const uint32_t flags_must, ++ const uint32_t flags_not, ++ const unsigned int width, ++ const unsigned int height, ++ mediabufs_dst_fmt_accept_fn *const accept_fn, ++ void *const accept_v) ++{ ++ unsigned int i; ++ ++ for (i = 0;; ++i) { ++ struct v4l2_fmtdesc fmtdesc = { ++ .index = i, ++ .type = type_v4l2 ++ }; ++ while (ioctl(fd, VIDIOC_ENUM_FMT, &fmtdesc)) { ++ if (errno != EINTR) ++ return MEDIABUFS_ERROR_UNSUPPORTED_BUFFERTYPE; ++ } ++ if ((fmtdesc.flags & flags_must) != flags_must || ++ (fmtdesc.flags & flags_not)) ++ continue; ++ if (!accept_fn(accept_v, &fmtdesc)) ++ continue; ++ ++ if (fmt_set(fmt, fd, fmtdesc.type, fmtdesc.pixelformat, ++ width, height, 0) == MEDIABUFS_STATUS_SUCCESS) ++ return MEDIABUFS_STATUS_SUCCESS; ++ } ++ return 0; ++} ++ ++ ++/* Wait for qent done */ ++ ++MediaBufsStatus qent_dst_wait(struct qent_dst *const be_dst) ++{ ++ struct qent_base *const be = &be_dst->base; ++ enum qent_status estat; ++ ++ pthread_mutex_lock(&be_dst->lock); ++ while (be_dst->waiting && ++ !pthread_cond_wait(&be_dst->cond, &be_dst->lock)) ++ /* Loop */; ++ estat = be->status; ++ pthread_mutex_unlock(&be_dst->lock); ++ ++ return estat == QENT_DONE ? MEDIABUFS_STATUS_SUCCESS : ++ estat == QENT_ERROR ? MEDIABUFS_ERROR_DECODING_ERROR : ++ MEDIABUFS_ERROR_OPERATION_FAILED; ++} ++ ++const uint8_t * qent_dst_data(struct qent_dst *const be_dst, unsigned int buf_no) ++{ ++ struct qent_base *const be = &be_dst->base; ++ return dmabuf_map(be->dh[buf_no]); ++} ++ ++MediaBufsStatus qent_dst_read_start(struct qent_dst *const be_dst) ++{ ++ struct qent_base *const be = &be_dst->base; ++ unsigned int i; ++ for (i = 0; i != VIDEO_MAX_PLANES && be->dh[i]; ++i) { ++ if (dmabuf_read_start(be->dh[i])) { ++ while (i--) ++ dmabuf_read_end(be->dh[i]); ++ return MEDIABUFS_ERROR_ALLOCATION_FAILED; ++ } ++ } ++ return MEDIABUFS_STATUS_SUCCESS; ++} ++ ++MediaBufsStatus qent_dst_read_stop(struct qent_dst *const be_dst) ++{ ++ struct qent_base *const be = &be_dst->base; ++ unsigned int i; ++ MediaBufsStatus status = MEDIABUFS_STATUS_SUCCESS; ++ ++ for (i = 0; i != VIDEO_MAX_PLANES && be->dh[i]; ++i) { ++ if (dmabuf_read_end(be->dh[i])) ++ status = MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ return status; ++} ++ ++void qent_dst_delete(struct qent_dst *const be_dst) ++{ ++ if (!be_dst) ++ return; ++ ++ ff_weak_link_unref(&be_dst->mbc_wl); ++ pthread_cond_destroy(&be_dst->cond); ++ pthread_mutex_destroy(&be_dst->lock); ++ qe_base_uninit(&be_dst->base); ++ free(be_dst); ++} ++ ++void qent_dst_free(struct qent_dst ** const pbe_dst) ++{ ++ struct qent_dst * const be_dst = *pbe_dst; ++ struct mediabufs_ctl * mbc; ++ if (!be_dst) ++ return; ++ ++ *pbe_dst = NULL; ++ if ((mbc = ff_weak_link_lock(&be_dst->mbc_wl)) != NULL) { ++ queue_put_free(mbc->dst, &be_dst->base); ++ ff_weak_link_unlock(be_dst->mbc_wl); ++ } ++ else { ++ qent_dst_delete(be_dst); + } +} + -+ -diff --git a/libavcodec/v4l2_phase.h b/libavcodec/v4l2_phase.h -new file mode 100644 -index 0000000000..392f22b988 ---- /dev/null -+++ b/libavcodec/v4l2_phase.h -@@ -0,0 +1,37 @@ -+// v4l2_phase.h -+#ifndef AVCODEC_V4L2_PHASE_H -+#define AVCODEC_V4L2_PHASE_H -+ -+#define V4L2PHASE_PHASE_COUNT 2 -+ -+struct V4L2PhaseControl; -+typedef struct V4L2PhaseControl V4L2PhaseControl; -+ -+typedef struct V4L2PhaseInfo { -+ unsigned int n2; // (phase << 1) | (claimed) -+ unsigned int order; -+ V4L2PhaseControl * ctrl; -+} V4L2PhaseInfo; -+ -+unsigned int ff_v4l2_phase_order_next(V4L2PhaseControl * const pc); -+ -+static inline int ff_v4l2_phase_started(const V4L2PhaseInfo * const pi) ++MediaBufsStatus qent_dst_import_fd(struct qent_dst *const be_dst, ++ unsigned int plane, ++ int fd, size_t size) +{ -+ return pi->n2 != 0; ++ struct qent_base *const be = &be_dst->base; ++ struct dmabuf_h * dh; ++ ++ if (be->status != QENT_IMPORT || be->dh[plane]) ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ ++ dh = dmabuf_import(fd, size); ++ if (!dh) ++ return MEDIABUFS_ERROR_ALLOCATION_FAILED; ++ ++ be->dh[plane] = dh; ++ return MEDIABUFS_STATUS_SUCCESS; +} + -+// Init the PhaseInfo, assign a new order, claim phase 0 -+int ff_v4l2_phase_start(V4L2PhaseInfo * const pi, V4L2PhaseControl * const pc); ++static int create_dst_buf(struct mediabufs_ctl *const mbc) ++{ ++ struct v4l2_create_buffers cbuf = { ++ .count = 1, ++ .memory = V4L2_MEMORY_DMABUF, ++ .format = mbc->dst_fmt, ++ }; + -+// Phase isn't required but it acts as a check that we know what we are doing -+int ff_v4l2_phase_claim(V4L2PhaseInfo * const pi, unsigned int phase); -+int ff_v4l2_phase_release(V4L2PhaseInfo * const pi, unsigned int phase); ++ while (ioctl(mbc->vfd, VIDIOC_CREATE_BUFS, &cbuf)) { ++ const int err = -errno; ++ if (err != EINTR) { ++ request_err(mbc->dc, "%s: Failed to create V4L2 buffer\n", __func__); ++ return -err; ++ } ++ } ++ return cbuf.index; ++} + -+// Release any claimed phase and claim+release all remaining phases -+void ff_v4l2_phase_abort(V4L2PhaseInfo * const pi); ++struct qent_dst* mediabufs_dst_qent_alloc(struct mediabufs_ctl *const mbc, struct dmabufs_ctl *const dbsc) ++{ ++ struct qent_dst * be_dst; ++ ++ if (mbc == NULL) { ++ be_dst = qe_dst_new(); ++ if (be_dst) ++ be_dst->base.status = QENT_IMPORT; ++ return be_dst; ++ } ++ ++ be_dst = base_to_dst(queue_tryget_free(mbc->dst)); ++ if (!be_dst) { ++ int index; ++ ++ be_dst = qe_dst_new(); ++ if (!be_dst) ++ return NULL; ++ ++ if ((be_dst->mbc_wl = ff_weak_link_ref(mbc->this_wlm)) == NULL || ++ (index = create_dst_buf(mbc)) < 0) { ++ qent_dst_delete(be_dst); ++ return NULL; ++ } ++ ++ be_dst->base.index = (uint32_t)index; ++ } ++ ++ if (qe_alloc_from_fmt(&be_dst->base, dbsc, &mbc->dst_fmt)) { ++ /* Given how create buf works we can't uncreate it on alloc failure ++ * all we can do is put it on the free Q ++ */ ++ queue_put_free(mbc->dst, &be_dst->base); ++ return NULL; ++ } ++ ++ return be_dst; ++} ++ ++const struct v4l2_format *mediabufs_dst_fmt(struct mediabufs_ctl *const mbc) ++{ ++ return &mbc->dst_fmt; ++} ++ ++MediaBufsStatus mediabufs_dst_fmt_set(struct mediabufs_ctl *const mbc, ++ const unsigned int width, ++ const unsigned int height, ++ mediabufs_dst_fmt_accept_fn *const accept_fn, ++ void *const accept_v) ++{ ++ MediaBufsStatus status; ++ unsigned int i; ++ const enum v4l2_buf_type buf_type = mbc->dst_fmt.type; ++ static const struct { ++ unsigned int flags_must; ++ unsigned int flags_not; ++ } trys[] = { ++ {0, V4L2_FMT_FLAG_EMULATED}, ++ {V4L2_FMT_FLAG_EMULATED, 0}, ++ }; ++ for (i = 0; i != sizeof(trys)/sizeof(trys[0]); ++i) { ++ status = find_fmt_flags(&mbc->dst_fmt, mbc->vfd, ++ buf_type, ++ trys[i].flags_must, ++ trys[i].flags_not, ++ width, height, accept_fn, accept_v); ++ if (status != MEDIABUFS_ERROR_UNSUPPORTED_BUFFERTYPE) ++ return status; ++ } ++ ++ if (status != MEDIABUFS_STATUS_SUCCESS) ++ return status; ++ ++ /* Try to create a buffer - don't alloc */ ++ return status; ++} ++ ++MediaBufsStatus mediabufs_dst_slots_create(struct mediabufs_ctl *const mbc, unsigned int n) ++{ ++ // **** request buffers ++ unsigned int i; ++ ++ for (i = 0; i != n; ++i) ++ { ++ int index; ++ struct qent_dst * const be_dst = qe_dst_new(); ++ if (!be_dst) ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ ++ index = create_dst_buf(mbc); ++ if (index < 0) { ++ qent_dst_delete(be_dst); ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ ++ // Add index to free chain ++ be_dst->base.index = (uint32_t)index; ++ queue_put_free(mbc->dst, &be_dst->base); ++ } ++ return MEDIABUFS_STATUS_SUCCESS; ++} ++ ++struct qent_src *mediabufs_src_qent_get(struct mediabufs_ctl *const mbc) ++{ ++ struct qent_base * buf = queue_get_free(mbc->src); ++ return base_to_src(buf); ++} ++ ++void mediabufs_src_qent_abort(struct mediabufs_ctl *const mbc, struct qent_src **const pqe_src) ++{ ++ struct qent_src *const qe_src = *pqe_src; ++ if (!qe_src) ++ return; ++ *pqe_src = NULL; ++ queue_put_free(mbc->src, &qe_src->base); ++} ++ ++/* src format must have been set up before this */ ++MediaBufsStatus mediabufs_src_pool_create(struct mediabufs_ctl *const mbc, ++ struct dmabufs_ctl * const dbsc, ++ unsigned int n) ++{ ++ unsigned int i; ++ struct v4l2_requestbuffers req = { ++ .count = n, ++ .type = mbc->src_fmt.type, ++ .memory = V4L2_MEMORY_DMABUF ++ }; ++ ++ bq_free_all_free_src(mbc->src); ++ while (ioctl(mbc->vfd, VIDIOC_REQBUFS, &req) == -1) { ++ if (errno != EINTR) { ++ request_err(mbc->dc, "%s: Failed to request src bufs\n", __func__); ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ } ++ ++ if (n > req.count) { ++ request_info(mbc->dc, "Only allocated %d of %d src buffers requested\n", req.count, n); ++ n = req.count; ++ } ++ ++ for (i = 0; i != n; ++i) { ++ struct qent_src *const be_src = qe_src_new(); ++ if (!be_src) { ++ request_err(mbc->dc, "Failed to create src be %d\n", i); ++ goto fail; ++ } ++ if (qe_alloc_from_fmt(&be_src->base, dbsc, &mbc->src_fmt)) { ++ qe_src_delete(be_src); ++ goto fail; ++ } ++ be_src->base.index = i; ++ be_src->fixed_size = !mediabufs_src_resizable(mbc); ++ ++ queue_put_free(mbc->src, &be_src->base); ++ } ++ ++ return MEDIABUFS_STATUS_SUCCESS; ++ ++fail: ++ bq_free_all_free_src(mbc->src); ++ req.count = 0; ++ while (ioctl(mbc->vfd, VIDIOC_REQBUFS, &req) == -1 && ++ errno == EINTR) ++ /* Loop */; ++ ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++} + + -+V4L2PhaseControl * ff_v4l2_phase_control_new(unsigned int phase_count); -+void ff_v4l2_phase_control_deletez(V4L2PhaseControl ** const ppc); ++ ++/* ++ * Set stuff order: ++ * Set src fmt ++ * Set parameters (sps) on vfd ++ * Negotiate dst format (dst_fmt_set) ++ * Create src buffers ++ * Alloc a dst buffer or Create dst slots ++*/ ++MediaBufsStatus mediabufs_stream_on(struct mediabufs_ctl *const mbc) ++{ ++ if (mbc->stream_on) ++ return MEDIABUFS_STATUS_SUCCESS; ++ ++ if (set_stream(mbc->vfd, mbc->src_fmt.type, true) < 0) { ++ request_log("Failed to set stream on src type %d\n", mbc->src_fmt.type); ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ ++ if (set_stream(mbc->vfd, mbc->dst_fmt.type, true) < 0) { ++ request_log("Failed to set stream on dst type %d\n", mbc->dst_fmt.type); ++ set_stream(mbc->vfd, mbc->src_fmt.type, false); ++ return MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ ++ mbc->stream_on = true; ++ return MEDIABUFS_STATUS_SUCCESS; ++} ++ ++MediaBufsStatus mediabufs_stream_off(struct mediabufs_ctl *const mbc) ++{ ++ MediaBufsStatus status = MEDIABUFS_STATUS_SUCCESS; ++ ++ if (!mbc->stream_on) ++ return MEDIABUFS_STATUS_SUCCESS; ++ ++ if (set_stream(mbc->vfd, mbc->src_fmt.type, false) < 0) { ++ request_log("Failed to set stream off src type %d\n", mbc->src_fmt.type); ++ status = MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ ++ if (set_stream(mbc->vfd, mbc->dst_fmt.type, false) < 0) { ++ request_log("Failed to set stream off dst type %d\n", mbc->dst_fmt.type); ++ status = MEDIABUFS_ERROR_OPERATION_FAILED; ++ } ++ ++ mbc->stream_on = false; ++ return status; ++} ++ ++int mediabufs_ctl_set_ext_ctrls(struct mediabufs_ctl * mbc, struct media_request * const mreq, struct v4l2_ext_control control_array[], unsigned int n) ++{ ++ struct v4l2_ext_controls controls = { ++ .controls = control_array, ++ .count = n ++ }; ++ ++ if (mreq) { ++ controls.which = V4L2_CTRL_WHICH_REQUEST_VAL; ++ controls.request_fd = media_request_fd(mreq); ++ } ++ ++ while (ioctl(mbc->vfd, VIDIOC_S_EXT_CTRLS, &controls)) ++ { ++ const int err = errno; ++ if (err != EINTR) { ++ request_err(mbc->dc, "Unable to set controls: %s\n", strerror(err)); ++ return -err; ++ } ++ } ++ ++ return 0; ++} ++ ++MediaBufsStatus mediabufs_set_ext_ctrl(struct mediabufs_ctl *const mbc, ++ struct media_request * const mreq, ++ unsigned int id, void *data, ++ unsigned int size) ++{ ++ struct v4l2_ext_control control = { ++ .id = id, ++ .ptr = data, ++ .size = size ++ }; ++ ++ int rv = mediabufs_ctl_set_ext_ctrls(mbc, mreq, &control, 1); ++ return !rv ? MEDIABUFS_STATUS_SUCCESS : MEDIABUFS_ERROR_OPERATION_FAILED; ++} ++ ++MediaBufsStatus mediabufs_src_fmt_set(struct mediabufs_ctl *const mbc, ++ enum v4l2_buf_type buf_type, ++ const uint32_t pixfmt, ++ const uint32_t width, const uint32_t height, ++ const size_t bufsize) ++{ ++ MediaBufsStatus rv = fmt_set(&mbc->src_fmt, mbc->vfd, buf_type, pixfmt, width, height, bufsize); ++ if (rv != MEDIABUFS_STATUS_SUCCESS) ++ request_err(mbc->dc, "Failed to set src buftype %d, format %#x %dx%d\n", buf_type, pixfmt, width, height); ++ ++ return rv; ++} ++ ++int mediabufs_ctl_query_ext_ctrls(struct mediabufs_ctl * mbc, struct v4l2_query_ext_ctrl ctrls[], unsigned int n) ++{ ++ int rv = 0; ++ while (n--) { ++ while (ioctl(mbc->vfd, VIDIOC_QUERY_EXT_CTRL, ctrls)) { ++ const int err = errno; ++ if (err != EINTR) { ++ request_err(mbc->dc, "Failed to query ext id=%#x, err=%d\n", ctrls->id, err); ++ ctrls->type = 0; // 0 is invalid ++ rv = -err; ++ break; ++ } ++ } ++ ++ctrls; ++ } ++ return rv; ++} ++ ++int mediabufs_src_resizable(const struct mediabufs_ctl *const mbc) ++{ ++ // Single planar OUTPUT can only take exact size buffers ++ // Multiplanar will take larger than negotiated ++ return V4L2_TYPE_IS_MULTIPLANAR(mbc->src_fmt.type); ++} ++ ++static void mediabufs_ctl_delete(struct mediabufs_ctl *const mbc) ++{ ++ if (!mbc) ++ return; ++ ++ // Break the weak link first ++ ff_weak_link_break(&mbc->this_wlm); ++ ++ polltask_delete(&mbc->pt); ++ ++ mediabufs_stream_off(mbc); ++ ++ // Empty v4l2 buffer stash ++ request_buffers(mbc->vfd, mbc->src_fmt.type, V4L2_MEMORY_MMAP, 0); ++ request_buffers(mbc->vfd, mbc->dst_fmt.type, V4L2_MEMORY_MMAP, 0); ++ ++ queue_delete(mbc->dst); ++ queue_delete(mbc->src); ++ close(mbc->vfd); ++ pthread_mutex_destroy(&mbc->lock); ++ ++ free(mbc); ++} ++ ++struct mediabufs_ctl * mediabufs_ctl_ref(struct mediabufs_ctl *const mbc) ++{ ++ atomic_fetch_add(&mbc->ref_count, 1); ++ return mbc; ++} ++ ++void mediabufs_ctl_unref(struct mediabufs_ctl **const pmbc) ++{ ++ struct mediabufs_ctl *const mbc = *pmbc; ++ int n; ++ ++ if (!mbc) ++ return; ++ *pmbc = NULL; ++ n = atomic_fetch_sub(&mbc->ref_count, 1); ++ if (n) ++ return; ++ mediabufs_ctl_delete(mbc); ++} ++ ++static int set_capabilities(struct mediabufs_ctl *const mbc) ++{ ++ struct v4l2_capability capability = { 0 }; ++ uint32_t caps; ++ ++ if (ioctl(mbc->vfd, VIDIOC_QUERYCAP, &capability)) { ++ int err = errno; ++ request_err(mbc->dc, "Failed to get capabilities: %s\n", strerror(err)); ++ return -err; ++ } ++ ++ caps = (capability.capabilities & V4L2_CAP_DEVICE_CAPS) != 0 ? ++ capability.device_caps : ++ capability.capabilities; ++ ++ if ((caps & V4L2_CAP_VIDEO_M2M_MPLANE) != 0) { ++ mbc->src_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; ++ mbc->dst_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; ++ } ++ else if ((caps & V4L2_CAP_VIDEO_M2M) != 0) { ++ mbc->src_fmt.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ++ mbc->dst_fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ++ } ++ else { ++ request_err(mbc->dc, "No M2M capabilities (%#x)\n", caps); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* One of these per context */ ++struct mediabufs_ctl * mediabufs_ctl_new(void * const dc, const char * vpath, struct pollqueue *const pq) ++{ ++ struct mediabufs_ctl *const mbc = calloc(1, sizeof(*mbc)); ++ ++ if (!mbc) ++ return NULL; ++ ++ mbc->dc = dc; ++ // Default mono planar ++ mbc->pq = pq; ++ pthread_mutex_init(&mbc->lock, NULL); ++ ++ /* Pick a default - could we scan for this? */ ++ if (vpath == NULL) ++ vpath = "/dev/media0"; ++ ++ while ((mbc->vfd = open(vpath, O_RDWR)) == -1) ++ { ++ const int err = errno; ++ if (err != EINTR) { ++ request_err(dc, "Failed to open video dev '%s': %s\n", vpath, strerror(err)); ++ goto fail0; ++ } ++ } ++ ++ if (set_capabilities(mbc)) { ++ request_err(dc, "Bad capabilities for video dev '%s'\n", vpath); ++ goto fail1; ++ } ++ ++ mbc->src = queue_new(mbc->vfd, pq); ++ if (!mbc->src) ++ goto fail1; ++ mbc->dst = queue_new(mbc->vfd, pq); ++ if (!mbc->dst) ++ goto fail2; ++ mbc->pt = polltask_new(mbc->vfd, POLLIN | POLLOUT, mediabufs_poll_cb, mbc); ++ if (!mbc->pt) ++ goto fail3; ++ mbc->this_wlm = ff_weak_link_new(mbc); ++ if (!mbc->this_wlm) ++ goto fail4; ++ ++ /* Cannot add polltask now - polling with nothing pending ++ * generates infinite error polls ++ */ ++ return mbc; ++ ++fail4: ++ polltask_delete(&mbc->pt); ++fail3: ++ queue_delete(mbc->dst); ++fail2: ++ queue_delete(mbc->src); ++fail1: ++ close(mbc->vfd); ++fail0: ++ free(mbc); ++ request_info(dc, "%s: FAILED\n", __func__); ++ return NULL; ++} ++ ++ ++ +diff --git a/libavcodec/v4l2_req_media.h b/libavcodec/v4l2_req_media.h +new file mode 100644 +index 0000000000..75956eb962 +--- /dev/null ++++ b/libavcodec/v4l2_req_media.h +@@ -0,0 +1,145 @@ ++/* ++e.h ++* ++ * Permission is hereby granted, free of charge, to any person obtaining a ++ * copy of this software and associated documentation files (the ++ * "Software"), to deal in the Software without restriction, including ++ * without limitation the rights to use, copy, modify, merge, publish, ++ * distribute, sub license, and/or sell copies of the Software, and to ++ * permit persons to whom the Software is furnished to do so, subject to ++ * the following conditions: ++ * ++ * The above copyright notice and this permission notice (including the ++ * next paragraph) shall be included in all copies or substantial portions ++ * of the Software. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS ++ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. ++ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ++ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ++ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ++ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ++ */ ++ ++#ifndef _MEDIA_H_ ++#define _MEDIA_H_ ++ ++#include ++#include ++ ++struct v4l2_format; ++struct v4l2_fmtdesc; ++struct v4l2_query_ext_ctrl; ++ ++struct pollqueue; ++struct media_request; ++struct media_pool; ++ ++typedef enum media_buf_status { ++ MEDIABUFS_STATUS_SUCCESS = 0, ++ MEDIABUFS_ERROR_OPERATION_FAILED, ++ MEDIABUFS_ERROR_DECODING_ERROR, ++ MEDIABUFS_ERROR_UNSUPPORTED_BUFFERTYPE, ++ MEDIABUFS_ERROR_UNSUPPORTED_RT_FORMAT, ++ MEDIABUFS_ERROR_ALLOCATION_FAILED, ++} MediaBufsStatus; ++ ++struct media_pool * media_pool_new(const char * const media_path, ++ struct pollqueue * const pq, ++ const unsigned int n); ++void media_pool_delete(struct media_pool ** pmp); ++ ++// Obtain a media request ++// Will block if none availible - has a 2sec timeout ++struct media_request * media_request_get(struct media_pool * const mp); ++int media_request_fd(const struct media_request * const req); ++ ++// Start this request ++// Request structure is returned to pool once done ++int media_request_start(struct media_request * const req); ++ ++// Return an *unstarted* media_request to the pool ++// May later be upgraded to allow for aborting a started req ++int media_request_abort(struct media_request ** const preq); ++ ++ ++struct mediabufs_ctl; ++struct qent_src; ++struct qent_dst; ++struct dmabuf_h; ++struct dmabufs_ctl; ++ ++int qent_src_params_set(struct qent_src *const be, const struct timeval * timestamp); ++// prealloc ++int qent_src_alloc(struct qent_src *const be_src, const size_t len, struct dmabufs_ctl * dbsc); ++// dbsc may be NULL if realloc not required ++int qent_src_data_copy(struct qent_src *const be_src, const size_t offset, const void *const src, const size_t len, struct dmabufs_ctl * dbsc); ++const struct dmabuf_h * qent_dst_dmabuf(const struct qent_dst *const be, unsigned int plane); ++int qent_dst_dup_fd(const struct qent_dst *const be, unsigned int plane); ++MediaBufsStatus qent_dst_wait(struct qent_dst *const be); ++void qent_dst_delete(struct qent_dst *const be); ++// Returns a qent_dst to its mbc free Q or deletes it if the mbc is dead ++void qent_dst_free(struct qent_dst ** const pbe_dst); ++ ++const uint8_t * qent_dst_data(struct qent_dst *const be, unsigned int buf_no); ++MediaBufsStatus qent_dst_read_start(struct qent_dst *const be); ++MediaBufsStatus qent_dst_read_stop(struct qent_dst *const be); ++/* Import an fd unattached to any mediabuf */ ++MediaBufsStatus qent_dst_import_fd(struct qent_dst *const be_dst, ++ unsigned int plane, ++ int fd, size_t size); ++ ++MediaBufsStatus mediabufs_start_request(struct mediabufs_ctl *const mbc, ++ struct media_request **const pmreq, ++ struct qent_src **const psrc_be, ++ struct qent_dst *const dst_be, ++ const bool is_final); ++// Get / alloc a dst buffer & associate with a slot ++// * BEWARE * Currently has no alloc limit ++struct qent_dst* mediabufs_dst_qent_alloc(struct mediabufs_ctl *const mbc, ++ struct dmabufs_ctl *const dbsc); ++// Create dst slots without alloc ++MediaBufsStatus mediabufs_dst_slots_create(struct mediabufs_ctl *const mbc, unsigned int n); ++ ++MediaBufsStatus mediabufs_stream_on(struct mediabufs_ctl *const mbc); ++MediaBufsStatus mediabufs_stream_off(struct mediabufs_ctl *const mbc); ++const struct v4l2_format *mediabufs_dst_fmt(struct mediabufs_ctl *const mbc); ++ ++typedef int mediabufs_dst_fmt_accept_fn(void * v, const struct v4l2_fmtdesc *fmtdesc); ++ ++MediaBufsStatus mediabufs_dst_fmt_set(struct mediabufs_ctl *const mbc, ++ const unsigned int width, ++ const unsigned int height, ++ mediabufs_dst_fmt_accept_fn *const accept_fn, ++ void *const accept_v); ++struct qent_src *mediabufs_src_qent_get(struct mediabufs_ctl *const mbc); ++void mediabufs_src_qent_abort(struct mediabufs_ctl *const mbc, struct qent_src **const pqe_src); ++ ++int mediabufs_ctl_set_ext_ctrls(struct mediabufs_ctl * mbc, struct media_request * const mreq, ++ struct v4l2_ext_control control_array[], unsigned int n); ++MediaBufsStatus mediabufs_set_ext_ctrl(struct mediabufs_ctl *const mbc, ++ struct media_request * const mreq, ++ unsigned int id, void *data, ++ unsigned int size); ++int mediabufs_ctl_query_ext_ctrls(struct mediabufs_ctl * mbc, struct v4l2_query_ext_ctrl ctrls[], unsigned int n); ++ ++int mediabufs_src_resizable(const struct mediabufs_ctl *const mbc); ++ ++MediaBufsStatus mediabufs_src_fmt_set(struct mediabufs_ctl *const mbc, ++ enum v4l2_buf_type buf_type, ++ const uint32_t pixfmt, ++ const uint32_t width, const uint32_t height, ++ const size_t bufsize); ++ ++MediaBufsStatus mediabufs_src_pool_create(struct mediabufs_ctl *const rw, ++ struct dmabufs_ctl * const dbsc, ++ unsigned int n); ++ ++struct mediabufs_ctl * mediabufs_ctl_new(void * const dc, ++ const char *vpath, struct pollqueue *const pq); ++void mediabufs_ctl_unref(struct mediabufs_ctl **const pmbc); ++struct mediabufs_ctl * mediabufs_ctl_ref(struct mediabufs_ctl *const mbc); ++ + +#endif -diff --git a/libavcodec/v4l2_request.c b/libavcodec/v4l2_request.c +diff --git a/libavcodec/v4l2_req_pollqueue.c b/libavcodec/v4l2_req_pollqueue.c new file mode 100644 -index 0000000000..2306c21836 +index 0000000000..6ef057232b --- /dev/null -+++ b/libavcodec/v4l2_request.c -@@ -0,0 +1,1094 @@ ++++ b/libavcodec/v4l2_req_pollqueue.c +@@ -0,0 +1,280 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "v4l2_req_pollqueue.h" ++#include "v4l2_req_utils.h" ++ ++ ++struct pollqueue; ++ ++struct polltask { ++ struct polltask *next; ++ struct polltask *prev; ++ struct pollqueue *q; ++ ++ int fd; ++ short events; /* 0 => deleted */ ++ ++ void (*fn)(void *v, short revents); ++ void * v; ++ ++ uint64_t timeout; /* 0 => now */ ++}; ++ ++struct pollqueue { ++ pthread_mutex_t lock; ++ ++ struct polltask *head; ++ struct polltask *tail; ++ ++ bool kill; ++ bool no_prod; ++ int prod_fd; ++ struct polltask *prod_pt; ++ pthread_t worker; ++}; ++ ++struct polltask *polltask_new(const int fd, const short events, ++ void (*const fn)(void *v, short revents), ++ void *const v) ++{ ++ struct polltask *pt; ++ ++ if (!events) ++ return NULL; ++ ++ pt = malloc(sizeof(*pt)); ++ if (!pt) ++ return NULL; ++ ++ *pt = (struct polltask){ ++ .next = NULL, ++ .prev = NULL, ++ .fd = fd, ++ .events = events, ++ .fn = fn, ++ .v = v ++ }; ++ return pt; ++} ++ ++static void pollqueue_rem_task(struct pollqueue *const pq, struct polltask *const pt) ++{ ++ if (pt->prev) ++ pt->prev->next = pt->next; ++ else ++ pq->head = pt->next; ++ if (pt->next) ++ pt->next->prev = pt->prev; ++ else ++ pq->tail = pt->prev; ++ pt->next = NULL; ++ pt->prev = NULL; ++} ++ ++void polltask_delete(struct polltask **const ppt) ++{ ++ struct polltask *const pt = *ppt; ++ ++ if (!pt) ++ return; ++ *ppt = NULL; ++ ++ free(pt); ++} ++ ++static int pollqueue_prod(const struct pollqueue *const pq) ++{ ++ static const uint64_t one = 1; ++ return write(pq->prod_fd, &one, sizeof(one)); ++} ++ ++static uint64_t pollqueue_now(int timeout) ++{ ++ struct timespec now; ++ uint64_t now_ms; ++ ++ if (clock_gettime(CLOCK_MONOTONIC, &now)) ++ return 0; ++ now_ms = (now.tv_nsec / 1000000) + (uint64_t)now.tv_sec * 1000 + timeout; ++ return now_ms ? now_ms : (uint64_t)1; ++} ++ ++void pollqueue_add_task(struct pollqueue *const pq, struct polltask *const pt, ++ const int timeout) ++{ ++ bool prodme; ++ pthread_mutex_lock(&pq->lock); ++ if (pq->tail) ++ pq->tail->next = pt; ++ else ++ pq->head = pt; ++ pt->prev = pq->tail; ++ pt->next = NULL; ++ pt->q = pq; ++ pt->timeout = timeout < 0 ? 0 : pollqueue_now(timeout); ++ pq->tail = pt; ++ prodme = !pq->no_prod; ++ pthread_mutex_unlock(&pq->lock); ++ if (prodme) ++ pollqueue_prod(pq); ++} ++ ++static void *poll_thread(void *v) ++{ ++ struct pollqueue *const pq = v; ++ struct pollfd *a = NULL; ++ size_t asize = 0; ++ ++ pthread_mutex_lock(&pq->lock); ++ do { ++ unsigned int i; ++ unsigned int n = 0; ++ struct polltask *pt; ++ uint64_t now = pollqueue_now(0); ++ int timeout = -1; ++ int rv; ++ ++ for (pt = pq->head; pt; pt = pt->next) { ++ int64_t t; ++ ++ if (n >= asize) { ++ asize = asize ? asize * 2 : 4; ++ a = realloc(a, asize * sizeof(*a)); ++ if (!a) { ++ request_log("Failed to realloc poll array to %d\n", asize); ++ goto fail_locked; ++ } ++ } ++ ++ a[n++] = (struct pollfd){ ++ .fd = pt->fd, ++ .events = pt->events ++ }; ++ ++ t = (int64_t)(pt->timeout - now); ++ if (pt->timeout && t < INT_MAX && ++ (timeout < 0 || (int)t < timeout)) ++ timeout = (t < 0) ? 0 : (int)t; ++ } ++ pthread_mutex_unlock(&pq->lock); ++ ++ if ((rv = poll(a, n, timeout)) == -1) { ++ if (errno != EINTR) { ++ request_log("Poll error: %s\n", strerror(errno)); ++ goto fail_unlocked; ++ } ++ } ++ ++ pthread_mutex_lock(&pq->lock); ++ now = pollqueue_now(0); ++ ++ /* Prodding in this loop is pointless and might lead to ++ * infinite looping ++ */ ++ pq->no_prod = true; ++ for (i = 0, pt = pq->head; i < n; ++i) { ++ struct polltask *const pt_next = pt->next; ++ ++ /* Pending? */ ++ if (a[i].revents || ++ (pt->timeout && (int64_t)(now - pt->timeout) >= 0)) { ++ pollqueue_rem_task(pq, pt); ++ pthread_mutex_unlock(&pq->lock); ++ ++ /* This can add new entries to the Q but as ++ * those are added to the tail our existing ++ * chain remains intact ++ */ ++ pt->fn(pt->v, a[i].revents); ++ ++ pthread_mutex_lock(&pq->lock); ++ } ++ ++ pt = pt_next; ++ } ++ pq->no_prod = false; ++ ++ } while (!pq->kill); ++ ++fail_locked: ++ pthread_mutex_unlock(&pq->lock); ++fail_unlocked: ++ free(a); ++ return NULL; ++} ++ ++static void prod_fn(void *v, short revents) ++{ ++ struct pollqueue *const pq = v; ++ char buf[8]; ++ if (revents) ++ read(pq->prod_fd, buf, 8); ++ if (!pq->kill) ++ pollqueue_add_task(pq, pq->prod_pt, -1); ++} ++ ++struct pollqueue * pollqueue_new(void) ++{ ++ struct pollqueue *pq = malloc(sizeof(*pq)); ++ if (!pq) ++ return NULL; ++ *pq = (struct pollqueue){ ++ .head = NULL, ++ .tail = NULL, ++ .kill = false, ++ .lock = PTHREAD_MUTEX_INITIALIZER, ++ .prod_fd = -1 ++ }; ++ ++ pq->prod_fd = eventfd(0, EFD_NONBLOCK); ++ if (pq->prod_fd == 1) ++ goto fail1; ++ pq->prod_pt = polltask_new(pq->prod_fd, POLLIN, prod_fn, pq); ++ if (!pq->prod_pt) ++ goto fail2; ++ pollqueue_add_task(pq, pq->prod_pt, -1); ++ if (pthread_create(&pq->worker, NULL, poll_thread, pq)) ++ goto fail3; ++ return pq; ++ ++fail3: ++ polltask_delete(&pq->prod_pt); ++fail2: ++ close(pq->prod_fd); ++fail1: ++ free(pq); ++ return NULL; ++} ++ ++void pollqueue_delete(struct pollqueue **const ppq) ++{ ++ struct pollqueue * pq = *ppq; ++ void *rv; ++ ++ if (!pq) ++ return; ++ *ppq = NULL; ++ ++ pthread_mutex_lock(&pq->lock); ++ pq->kill = true; ++ pollqueue_prod(pq); ++ pthread_mutex_unlock(&pq->lock); ++ ++ pthread_join(pq->worker, &rv); ++ polltask_delete(&pq->prod_pt); ++ pthread_mutex_destroy(&pq->lock); ++ close(pq->prod_fd); ++ free(pq); ++} ++ ++ +diff --git a/libavcodec/v4l2_req_pollqueue.h b/libavcodec/v4l2_req_pollqueue.h +new file mode 100644 +index 0000000000..dcb0d80258 +--- /dev/null ++++ b/libavcodec/v4l2_req_pollqueue.h +@@ -0,0 +1,17 @@ ++#ifndef POLLQUEUE_H_ ++#define POLLQUEUE_H_ ++ ++struct polltask; ++struct pollqueue; ++ ++struct polltask *polltask_new(const int fd, const short events, ++ void (*const fn)(void *v, short revents), ++ void *const v); ++void polltask_delete(struct polltask **const ppt); ++ ++void pollqueue_add_task(struct pollqueue *const pq, struct polltask *const pt, ++ const int timeout); ++struct pollqueue * pollqueue_new(void); ++void pollqueue_delete(struct pollqueue **const ppq); ++ ++#endif /* POLLQUEUE_H_ */ +diff --git a/libavcodec/v4l2_req_utils.h b/libavcodec/v4l2_req_utils.h +new file mode 100644 +index 0000000000..9e9a5f7e39 +--- /dev/null ++++ b/libavcodec/v4l2_req_utils.h +@@ -0,0 +1,21 @@ ++#include "libavutil/log.h" ++ ++#define request_log(...) av_log(NULL, AV_LOG_INFO, __VA_ARGS__) ++ ++#define request_err(_ctx, ...) av_log(_ctx, AV_LOG_ERROR, __VA_ARGS__) ++#define request_info(_ctx, ...) av_log(_ctx, AV_LOG_INFO, __VA_ARGS__) ++#define request_debug(_ctx, ...) av_log(_ctx, AV_LOG_DEBUG, __VA_ARGS__) ++ ++static inline char safechar(char c) { ++ return c > 0x20 && c < 0x7f ? c : '.'; ++} ++ ++static inline const char * strfourcc(char tbuf[5], uint32_t fcc) { ++ tbuf[0] = safechar((fcc >> 0) & 0xff); ++ tbuf[1] = safechar((fcc >> 8) & 0xff); ++ tbuf[2] = safechar((fcc >> 16) & 0xff); ++ tbuf[3] = safechar((fcc >> 24) & 0xff); ++ tbuf[4] = '\0'; ++ return tbuf; ++} ++ +diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c +new file mode 100644 +index 0000000000..f869c4e3d5 +--- /dev/null ++++ b/libavcodec/v4l2_request_hevc.c +@@ -0,0 +1,1192 @@ +/* + * This file is part of FFmpeg. + * @@ -48465,20 +51462,22 @@ index 0000000000..2306c21836 + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ +#include "decode.h" -+#include "internal.h" -+#include "v4l2_request.h" -+#include "v4l2_phase.h" ++#include "hevcdec.h" ++#include "hwconfig.h" ++ ++#include "hevc-ctrls.h" ++#include "libavutil/hwcontext_drm.h" ++ ++#include "v4l2_req_devscan.h" ++#include "v4l2_req_dmabufs.h" ++#include "v4l2_req_pollqueue.h" ++#include "v4l2_req_media.h" ++#include "v4l2_req_utils.h" ++ ++#define MAX_SLICES 16 ++ ++#include + +#ifndef DRM_FORMAT_NV15 +#define DRM_FORMAT_NV15 fourcc_code('N', 'V', '1', '5') @@ -48502,1664 +51501,96 @@ index 0000000000..2306c21836 +#define DRM_FORMAT_NV20 fourcc_code('N', 'V', '2', '0') +#endif + -+uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame) -+{ -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ return req ? v4l2_timeval_to_ns(&req->capture.buffer.timestamp) : 0; -+} -+ -+int ff_v4l2_request_start_phase_control(AVFrame *frame, struct V4L2PhaseControl * ctrl) -+{ -+ V4L2RequestDescriptor * const req = (V4L2RequestDescriptor*)frame->data[0]; -+ return ff_v4l2_phase_start(&req->phase, ctrl); -+} -+ -+void ff_v4l2_request_abort_phase_control(AVFrame *frame) -+{ -+ if (frame != NULL && frame->data[0] != NULL) { -+ V4L2RequestDescriptor *const req = (V4L2RequestDescriptor *)frame->data[0]; -+ ff_v4l2_phase_abort(&req->phase); -+ } -+} -+ -+int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame) -+{ -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ memset(&req->drm, 0, sizeof(AVDRMFrameDescriptor)); -+ req->output.used = 0; -+ return 0; -+} -+ -+int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size) -+{ -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ if (req->output.used + size + (AV_INPUT_BUFFER_PADDING_SIZE * 4) <= req->output.size) { -+ memcpy(req->output.addr + req->output.used, data, size); -+ req->output.used += size; -+ } else { -+ av_log(avctx, AV_LOG_ERROR, "%s: output.used=%u output.size=%u size=%u\n", __func__, req->output.used, req->output.size, size); -+ } -+ return 0; -+} -+ -+static int v4l2_request_controls(V4L2RequestContext *ctx, int request_fd, unsigned long type, struct v4l2_ext_control *control, int count) -+{ -+ struct v4l2_ext_controls controls = { -+ .controls = control, -+ .count = count, -+ .request_fd = request_fd, -+ .which = (request_fd >= 0) ? V4L2_CTRL_WHICH_REQUEST_VAL : 0, -+ }; -+ -+ if (!control || !count) -+ return 0; -+ -+ return ioctl(ctx->video_fd, type, &controls); -+} -+ -+static int v4l2_request_set_controls(V4L2RequestContext *ctx, int request_fd, struct v4l2_ext_control *control, int count) -+{ -+ return v4l2_request_controls(ctx, request_fd, VIDIOC_S_EXT_CTRLS, control, count); -+} -+ -+int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ -+ ret = v4l2_request_controls(ctx, -1, VIDIOC_S_EXT_CTRLS, control, count); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return AVERROR(EINVAL); -+ } -+ -+ return ret; -+} -+ -+int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ -+ ret = v4l2_request_controls(ctx, -1, VIDIOC_G_EXT_CTRLS, control, count); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get controls failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return AVERROR(EINVAL); -+ } -+ -+ return ret; -+} -+ -+int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control) -+{ -+ int ret; -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_QUERY_EXT_CTRL, control); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return AVERROR(EINVAL); -+ } -+ -+ return 0; -+} -+ -+int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id) -+{ -+ int ret; -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ struct v4l2_queryctrl control = { -+ .id = id, -+ }; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCTRL, &control); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: query control failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return AVERROR(EINVAL); -+ } -+ -+ return control.default_value; -+} -+ -+static int v4l2_request_queue_buffer(V4L2RequestContext *ctx, int request_fd, V4L2RequestBuffer *buf, uint32_t flags) -+{ -+ struct v4l2_plane planes[1] = {}; -+ struct v4l2_buffer buffer = { -+ .type = buf->buffer.type, -+ .memory = buf->buffer.memory, -+ .index = buf->index, -+ .timestamp.tv_usec = ctx->timestamp, -+ .bytesused = buf->used, -+ .request_fd = request_fd, -+ .flags = ((request_fd >= 0) ? V4L2_BUF_FLAG_REQUEST_FD : 0) | flags, -+ }; -+ -+ buf->buffer.timestamp = buffer.timestamp; -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) { -+ planes[0].bytesused = buf->used; -+ buffer.bytesused = 0; -+ buffer.length = 1; -+ buffer.m.planes = planes; -+ } -+ -+ return ioctl(ctx->video_fd, VIDIOC_QBUF, &buffer); -+} -+ -+static int v4l2_request_dequeue_buffer(V4L2RequestContext *ctx, V4L2RequestBuffer *buf) -+{ -+ int ret; -+ struct v4l2_plane planes[1] = {}; -+ struct v4l2_buffer buffer = { -+ .type = buf->buffer.type, -+ .memory = buf->buffer.memory, -+ .index = buf->index, -+ }; -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(buf->buffer.type)) { -+ buffer.length = 1; -+ buffer.m.planes = planes; -+ } -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_DQBUF, &buffer); -+ if (ret < 0) -+ return ret; -+ -+ buf->buffer.timestamp = buffer.timestamp; -+ buf->buffer.flags = buffer.flags; -+ return 0; -+} -+ -+const uint32_t v4l2_request_capture_pixelformats[] = { -+#if CONFIG_SAND -+ V4L2_PIX_FMT_NV12_COL128, -+ V4L2_PIX_FMT_NV12_10_COL128, -+#endif -+ V4L2_PIX_FMT_NV12, -+#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED -+ V4L2_PIX_FMT_SUNXI_TILED_NV12, -+#endif -+#if defined(V4L2_PIX_FMT_NV15) && defined(DRM_FORMAT_NV15) -+ V4L2_PIX_FMT_NV15, -+#endif -+ V4L2_PIX_FMT_NV16, -+#if defined(V4L2_PIX_FMT_NV20) && defined(DRM_FORMAT_NV20) -+ V4L2_PIX_FMT_NV20, -+#endif -+}; -+ -+static int v4l2_request_set_drm_descriptor(V4L2RequestDescriptor *req, struct v4l2_format *format) -+{ -+ AVDRMFrameDescriptor *desc = &req->drm; -+ AVDRMLayerDescriptor *layer = &desc->layers[0]; -+ uint32_t pixelformat = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.pixelformat : format->fmt.pix.pixelformat; -+ -+ switch (pixelformat) { -+ case V4L2_PIX_FMT_NV12: -+ layer->format = DRM_FORMAT_NV12; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; -+ break; -+#if CONFIG_SAND -+ case V4L2_PIX_FMT_NV12_COL128: -+ layer->format = DRM_FORMAT_NV12; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(format->fmt.pix.bytesperline); -+ break; -+ case V4L2_PIX_FMT_NV12_10_COL128: -+ layer->format = DRM_FORMAT_P030; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(format->fmt.pix.bytesperline); -+ break; -+#endif -+#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED -+ case V4L2_PIX_FMT_SUNXI_TILED_NV12: -+ layer->format = DRM_FORMAT_NV12; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_ALLWINNER_TILED; -+ break; -+#endif -+#if defined(V4L2_PIX_FMT_NV15) && defined(DRM_FORMAT_NV15) -+ case V4L2_PIX_FMT_NV15: -+ layer->format = DRM_FORMAT_NV15; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; -+ break; -+#endif -+ case V4L2_PIX_FMT_NV16: -+ layer->format = DRM_FORMAT_NV16; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; -+ break; -+#if defined(V4L2_PIX_FMT_NV20) && defined(DRM_FORMAT_NV20) -+ case V4L2_PIX_FMT_NV20: -+ layer->format = DRM_FORMAT_NV20; -+ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; -+ break; -+#endif -+ default: -+ return -1; -+ } -+ -+ desc->nb_objects = 1; -+ desc->objects[0].fd = req->capture.fd; -+ desc->objects[0].size = req->capture.size; -+ -+ desc->nb_layers = 1; -+ layer->nb_planes = 2; -+ -+ layer->planes[0].object_index = 0; -+ layer->planes[0].offset = 0; -+ layer->planes[0].pitch = V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.plane_fmt[0].bytesperline : format->fmt.pix.bytesperline; -+#if CONFIG_SAND -+ if (pixelformat == V4L2_PIX_FMT_NV12_COL128) { -+ layer->planes[1].object_index = 0; -+ layer->planes[1].offset = format->fmt.pix.height * 128; -+ layer->planes[0].pitch = format->fmt.pix.width; -+ layer->planes[1].pitch = format->fmt.pix.width; -+ } -+ else if (pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { -+ layer->planes[1].object_index = 0; -+ layer->planes[1].offset = format->fmt.pix.height * 128; -+ layer->planes[0].pitch = format->fmt.pix.width * 2; // Lies but it keeps DRM import happy -+ layer->planes[1].pitch = format->fmt.pix.width * 2; -+ } -+ else -+#endif -+ { -+ layer->planes[1].object_index = 0; -+ layer->planes[1].offset = layer->planes[0].pitch * (V4L2_TYPE_IS_MULTIPLANAR(format->type) ? format->fmt.pix_mp.height : format->fmt.pix.height); -+ layer->planes[1].pitch = layer->planes[0].pitch; -+ } -+ -+ return 0; -+} -+ -+static int v4l2_request_queue_decode(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ struct timeval tv = { 2, 0 }; -+ fd_set except_fds; -+ int ret; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p used=%u controls=%d index=%d fd=%d request_fd=%d first_slice=%d last_slice=%d\n", __func__, avctx, req->output.used, count, req->capture.index, req->capture.fd, req->request_fd, first_slice, last_slice); -+ -+ if (first_slice) -+ ctx->timestamp++; -+ -+ ret = v4l2_request_set_controls(ctx, req->request_fd, control, count); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed for request %d, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ -+ memset(req->output.addr + req->output.used, 0, AV_INPUT_BUFFER_PADDING_SIZE * 4); -+ -+ ret = v4l2_request_queue_buffer(ctx, req->request_fd, &req->output, last_slice ? 0 : V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: queue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ -+ if (first_slice) { -+ ret = v4l2_request_queue_buffer(ctx, -1, &req->capture, 0); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: queue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ } -+ -+ // NOTE: do we need to dequeue when request fails/timeout? -+ -+ // 4. queue request and wait -+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_QUEUE, NULL); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: queue request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); -+ goto fail; -+ } -+ -+ FD_ZERO(&except_fds); -+ FD_SET(req->request_fd, &except_fds); -+ -+ ret = select(req->request_fd + 1, NULL, NULL, &except_fds, &tv); -+ if (ret == 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: request %d timeout\n", __func__, req->request_fd); -+ goto fail; -+ } else if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: select request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); -+ goto fail; -+ } -+ -+ ret = v4l2_request_dequeue_buffer(ctx, &req->output); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ -+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ -+ if (last_slice) { -+ if (ff_v4l2_phase_started(&req->phase)) { -+ ff_v4l2_phase_release(&req->phase, 0); -+ ff_v4l2_phase_claim(&req->phase, 1); -+ } -+ -+ ret = v4l2_request_dequeue_buffer(ctx, &req->capture); -+ -+ if (ff_v4l2_phase_started(&req->phase)) { -+ ff_v4l2_phase_release(&req->phase, 1); -+ } -+ -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); -+ return -1; -+ } -+ } -+ -+ // TODO: check errors -+ // buffer.flags & V4L2_BUF_FLAG_ERROR -+ -+ if (last_slice) -+ return v4l2_request_set_drm_descriptor(req, &ctx->format); -+ -+ return 0; -+ -+fail: -+ ret = v4l2_request_dequeue_buffer(ctx, &req->output); -+ if (ret < 0) -+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue output buffer %d failed for request %d, %s (%d)\n", __func__, req->output.index, req->request_fd, strerror(errno), errno); -+ -+ ret = v4l2_request_dequeue_buffer(ctx, &req->capture); -+ if (ret < 0) -+ av_log(avctx, AV_LOG_ERROR, "%s: dequeue capture buffer %d failed for request %d, %s (%d)\n", __func__, req->capture.index, req->request_fd, strerror(errno), errno); -+ -+ ret = ioctl(req->request_fd, MEDIA_REQUEST_IOC_REINIT, NULL); -+ if (ret < 0) -+ av_log(avctx, AV_LOG_ERROR, "%s: reinit request %d failed, %s (%d)\n", __func__, req->request_fd, strerror(errno), errno); -+ -+ return -1; -+} -+ -+int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice) -+{ -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ -+ // fall back to queue each slice as a full frame -+ if ((req->output.capabilities & V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) != V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF) -+ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1); -+ -+ return v4l2_request_queue_decode(avctx, frame, control, count, first_slice, last_slice); -+} -+ -+int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count) -+{ -+ return v4l2_request_queue_decode(avctx, frame, control, count, 1, 1); -+} -+ -+static int v4l2_request_try_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ struct v4l2_fmtdesc fmtdesc = { -+ .index = 0, -+ .type = type, -+ }; -+ -+ if (V4L2_TYPE_IS_OUTPUT(type)) { -+ struct v4l2_create_buffers buffers = { -+ .count = 0, -+ .memory = V4L2_MEMORY_MMAP, -+ .format.type = type, -+ }; -+ -+ if (ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers) < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); -+ return -1; -+ } -+ -+ if ((buffers.capabilities & V4L2_BUF_CAP_SUPPORTS_REQUESTS) != V4L2_BUF_CAP_SUPPORTS_REQUESTS) { -+ av_log(avctx, AV_LOG_INFO, "%s: output buffer type do not support requests, capabilities %u\n", __func__, buffers.capabilities); -+ return -1; -+ } -+ } -+ -+ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) { -+ if (fmtdesc.pixelformat == pixelformat) -+ return 0; -+ -+ fmtdesc.index++; -+ } -+ -+ av_log(avctx, AV_LOG_INFO, "%s: pixelformat %u not supported for type %u\n", __func__, pixelformat, type); -+ return -1; -+} -+ -+static int v4l2_request_set_format(AVCodecContext *avctx, enum v4l2_buf_type type, uint32_t pixelformat, uint32_t buffersize) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ struct v4l2_format format = { -+ .type = type, -+ }; -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { -+ format.fmt.pix_mp.width = avctx->coded_width; -+ format.fmt.pix_mp.height = avctx->coded_height; -+ format.fmt.pix_mp.pixelformat = pixelformat; -+ format.fmt.pix_mp.plane_fmt[0].sizeimage = buffersize; -+ format.fmt.pix_mp.num_planes = 1; -+ } else { -+ format.fmt.pix.width = avctx->coded_width; -+ format.fmt.pix.height = avctx->coded_height; -+ format.fmt.pix.pixelformat = pixelformat; -+ format.fmt.pix.sizeimage = buffersize; -+ } -+ -+ return ioctl(ctx->video_fd, VIDIOC_S_FMT, &format); -+} -+ -+static int v4l2_request_select_capture_format(AVCodecContext *avctx) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ enum v4l2_buf_type type = ctx->format.type; -+ -+#if 0 -+ struct v4l2_format format = { -+ .type = type, -+ }; -+ struct v4l2_fmtdesc fmtdesc = { -+ .index = 0, -+ .type = type, -+ }; -+ uint32_t pixelformat; -+ int i; -+ -+ if (ioctl(ctx->video_fd, VIDIOC_G_FMT, &format) < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return -1; -+ } -+ -+ pixelformat = V4L2_TYPE_IS_MULTIPLANAR(type) ? format.fmt.pix_mp.pixelformat : format.fmt.pix.pixelformat; -+ -+ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { -+ if (pixelformat == v4l2_request_capture_pixelformats[i]) -+ return v4l2_request_set_format(avctx, type, pixelformat, 0); -+ } -+ -+ while (ioctl(ctx->video_fd, VIDIOC_ENUM_FMT, &fmtdesc) >= 0) { -+ for (i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { -+ if (fmtdesc.pixelformat == v4l2_request_capture_pixelformats[i]) -+ return v4l2_request_set_format(avctx, type, fmtdesc.pixelformat, 0); -+ } -+ -+ fmtdesc.index++; -+ } -+#else -+ for (int i = 0; i < FF_ARRAY_ELEMS(v4l2_request_capture_pixelformats); i++) { -+ uint32_t pixelformat = v4l2_request_capture_pixelformats[i]; -+ if (!v4l2_request_try_format(avctx, type, pixelformat)) -+ return v4l2_request_set_format(avctx, type, pixelformat, 0); -+ } ++// V4L2_PIX_FMT_NV12_10_COL128 and V4L2_PIX_FMT_NV12_COL128 should be defined ++// in drm_fourcc.h hopefully will be sometime in the future but until then... ++#ifndef V4L2_PIX_FMT_NV12_10_COL128 ++#define V4L2_PIX_FMT_NV12_10_COL128 v4l2_fourcc('N', 'C', '3', '0') +#endif + -+ return -1; -+} -+ -+static int v4l2_request_probe_video_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret = AVERROR(EINVAL); -+ struct v4l2_capability capability = {0}; -+ unsigned int capabilities = 0; -+ -+ const char *path = udev_device_get_devnode(device); -+ if (!path) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get video device devnode failed\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+// ctx->video_fd = open(path, O_RDWR | O_NONBLOCK, 0); -+ ctx->video_fd = open(path, O_RDWR, 0); -+ if (ctx->video_fd < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYCAP, &capability); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get video capability failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ if (capability.capabilities & V4L2_CAP_DEVICE_CAPS) -+ capabilities = capability.device_caps; -+ else -+ capabilities = capability.capabilities; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s capabilities=%u\n", __func__, avctx, ctx, path, capabilities); -+ -+ if ((capabilities & V4L2_CAP_STREAMING) != V4L2_CAP_STREAMING) { -+ av_log(avctx, AV_LOG_ERROR, "%s: missing required streaming capability\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ if ((capabilities & V4L2_CAP_VIDEO_M2M_MPLANE) == V4L2_CAP_VIDEO_M2M_MPLANE) { -+ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE; -+ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; -+ } else if ((capabilities & V4L2_CAP_VIDEO_M2M) == V4L2_CAP_VIDEO_M2M) { -+ ctx->output_type = V4L2_BUF_TYPE_VIDEO_OUTPUT; -+ ctx->format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; -+ } else { -+ av_log(avctx, AV_LOG_ERROR, "%s: missing required mem2mem capability\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = v4l2_request_try_format(avctx, ctx->output_type, pixelformat); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_WARNING, "%s: try output format failed\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = v4l2_request_set_format(avctx, ctx->output_type, pixelformat, buffersize); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: set output format failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = v4l2_request_set_controls(ctx, -1, control, count); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: set controls failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = v4l2_request_select_capture_format(avctx); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_WARNING, "%s: select capture format failed\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ return 0; -+ -+fail: -+ if (ctx->video_fd >= 0) { -+ close(ctx->video_fd); -+ ctx->video_fd = -1; -+ } -+ return ret; -+} -+ -+static int v4l2_request_init_context(AVCodecContext *avctx) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &ctx->format); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get capture format failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) { -+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, ctx->format.fmt.pix_mp.pixelformat, ctx->format.fmt.pix_mp.width, ctx->format.fmt.pix_mp.height, ctx->format.fmt.pix_mp.plane_fmt[0].bytesperline, ctx->format.fmt.pix_mp.plane_fmt[0].sizeimage, ctx->format.fmt.pix_mp.num_planes); -+ } else { -+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, ctx->format.fmt.pix.pixelformat, ctx->format.fmt.pix.width, ctx->format.fmt.pix.height, ctx->format.fmt.pix.bytesperline, ctx->format.fmt.pix.sizeimage); -+ } -+ -+ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_DRM); -+ if (ret < 0) -+ goto fail; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->output_type); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: output stream on failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMON, &ctx->format.type); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: capture stream on failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ return 0; -+ -+fail: -+ ff_v4l2_request_uninit(avctx); -+ return ret; -+} -+ -+static int v4l2_request_probe_media_device(struct udev_device *device, AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ struct media_device_info device_info = {0}; -+ struct media_v2_topology topology = {0}; -+ struct media_v2_interface *interfaces = NULL; -+ struct udev *udev = udev_device_get_udev(device); -+ struct udev_device *video_device; -+ dev_t devnum; -+ -+ const char *path = udev_device_get_devnode(device); -+ if (!path) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get media device devnode failed\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ctx->media_fd = open(path, O_RDWR, 0); -+ if (ctx->media_fd < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: opening %s failed, %s (%d)\n", __func__, path, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = ioctl(ctx->media_fd, MEDIA_IOC_DEVICE_INFO, &device_info); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get media device info failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p path=%s driver=%s\n", __func__, avctx, ctx, path, device_info.driver); -+ -+ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ if (topology.num_interfaces <= 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: media device has no interfaces\n", __func__); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ interfaces = av_mallocz(topology.num_interfaces * sizeof(struct media_v2_interface)); -+ if (!interfaces) { -+ av_log(avctx, AV_LOG_ERROR, "%s: allocating media interface struct failed\n", __func__); -+ ret = AVERROR(ENOMEM); -+ goto fail; -+ } -+ -+ topology.ptr_interfaces = (__u64)(uintptr_t)interfaces; -+ ret = ioctl(ctx->media_fd, MEDIA_IOC_G_TOPOLOGY, &topology); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get media topology failed, %s (%d)\n", __func__, strerror(errno), errno); -+ ret = AVERROR(EINVAL); -+ goto fail; -+ } -+ -+ ret = AVERROR(EINVAL); -+ for (int i = 0; i < topology.num_interfaces; i++) { -+ if (interfaces[i].intf_type != MEDIA_INTF_T_V4L_VIDEO) -+ continue; -+ -+ devnum = makedev(interfaces[i].devnode.major, interfaces[i].devnode.minor); -+ video_device = udev_device_new_from_devnum(udev, 'c', devnum); -+ if (!video_device) { -+ av_log(avctx, AV_LOG_ERROR, "%s: video_device=%p\n", __func__, video_device); -+ continue; -+ } -+ -+ ret = v4l2_request_probe_video_device(video_device, avctx, pixelformat, buffersize, control, count); -+ udev_device_unref(video_device); -+ -+ if (!ret) -+ break; -+ } -+ -+ av_freep(&interfaces); -+ return ret; -+ -+fail: -+ av_freep(&interfaces); -+ if (ctx->media_fd >= 0) { -+ close(ctx->media_fd); -+ ctx->media_fd = -1; -+ } -+ return ret; -+} -+ -+int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret = AVERROR(EINVAL); -+ struct udev *udev; -+ struct udev_enumerate *enumerate; -+ struct udev_list_entry *devices; -+ struct udev_list_entry *entry; -+ struct udev_device *device; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p hw_device_ctx=%p hw_frames_ctx=%p\n", __func__, avctx, avctx->hw_device_ctx, avctx->hw_frames_ctx); -+ -+ ctx->media_fd = -1; -+ ctx->video_fd = -1; -+ ctx->timestamp = 0; -+ -+ udev = udev_new(); -+ if (!udev) { -+ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev context failed\n", __func__); -+ ret = AVERROR(ENOMEM); -+ goto fail; -+ } -+ -+ enumerate = udev_enumerate_new(udev); -+ if (!enumerate) { -+ av_log(avctx, AV_LOG_ERROR, "%s: allocating udev enumerator failed\n", __func__); -+ ret = AVERROR(ENOMEM); -+ goto fail; -+ } -+ -+ udev_enumerate_add_match_subsystem(enumerate, "media"); -+ udev_enumerate_scan_devices(enumerate); -+ -+ devices = udev_enumerate_get_list_entry(enumerate); -+ udev_list_entry_foreach(entry, devices) { -+ const char *path = udev_list_entry_get_name(entry); -+ if (!path) -+ continue; -+ -+ device = udev_device_new_from_syspath(udev, path); -+ if (!device) -+ continue; -+ -+ ret = v4l2_request_probe_media_device(device, avctx, pixelformat, buffersize, control, count); -+ udev_device_unref(device); -+ -+ if (!ret) -+ break; -+ } -+ -+ udev_enumerate_unref(enumerate); -+ -+ if (!ret) -+ ret = v4l2_request_init_context(avctx); -+ -+fail: -+ udev_unref(udev); -+ return ret; -+} -+ -+int ff_v4l2_request_uninit(AVCodecContext *avctx) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p\n", __func__, avctx, ctx); -+ -+ if (ctx->video_fd >= 0) { -+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->output_type); -+ if (ret < 0) -+ av_log(avctx, AV_LOG_ERROR, "%s: output stream off failed, %s (%d)\n", __func__, strerror(errno), errno); -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_STREAMOFF, &ctx->format.type); -+ if (ret < 0) -+ av_log(avctx, AV_LOG_ERROR, "%s: capture stream off failed, %s (%d)\n", __func__, strerror(errno), errno); -+ } -+ -+ if (avctx->hw_frames_ctx) { -+ AVHWFramesContext *hwfc = (AVHWFramesContext*)avctx->hw_frames_ctx->data; -+ av_buffer_pool_flush(hwfc->pool); -+ } -+ -+ if (ctx->video_fd >= 0) -+ close(ctx->video_fd); -+ -+ if (ctx->media_fd >= 0) -+ close(ctx->media_fd); -+ -+ return 0; -+} -+ -+static int v4l2_request_buffer_alloc(AVCodecContext *avctx, V4L2RequestBuffer *buf, enum v4l2_buf_type type) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ int ret; -+ struct v4l2_plane planes[1] = {}; -+ struct v4l2_create_buffers buffers = { -+ .count = 1, -+ .memory = V4L2_MEMORY_MMAP, -+ .format.type = type, -+ }; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p buf=%p type=%u\n", __func__, avctx, buf, type); -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_G_FMT, &buffers.format); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: get format failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); -+ return ret; -+ } -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(buffers.format.type)) { -+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u num_planes=%u\n", __func__, buffers.format.fmt.pix_mp.pixelformat, buffers.format.fmt.pix_mp.width, buffers.format.fmt.pix_mp.height, buffers.format.fmt.pix_mp.plane_fmt[0].bytesperline, buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage, buffers.format.fmt.pix_mp.num_planes); -+ } else { -+ av_log(avctx, AV_LOG_DEBUG, "%s: pixelformat=%d width=%u height=%u bytesperline=%u sizeimage=%u\n", __func__, buffers.format.fmt.pix.pixelformat, buffers.format.fmt.pix.width, buffers.format.fmt.pix.height, buffers.format.fmt.pix.bytesperline, buffers.format.fmt.pix.sizeimage); -+ } -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_CREATE_BUFS, &buffers); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: create buffers failed for type %u, %s (%d)\n", __func__, type, strerror(errno), errno); -+ return ret; -+ } -+ -+ if (V4L2_TYPE_IS_MULTIPLANAR(type)) { -+ buf->width = buffers.format.fmt.pix_mp.width; -+ buf->height = buffers.format.fmt.pix_mp.height; -+ buf->size = buffers.format.fmt.pix_mp.plane_fmt[0].sizeimage; -+ buf->buffer.length = 1; -+ buf->buffer.m.planes = planes; -+ } else { -+ buf->width = buffers.format.fmt.pix.width; -+ buf->height = buffers.format.fmt.pix.height; -+ buf->size = buffers.format.fmt.pix.sizeimage; -+ } -+ -+ buf->index = buffers.index; -+ buf->capabilities = buffers.capabilities; -+ buf->used = 0; -+ -+ buf->buffer.type = type; -+ buf->buffer.memory = V4L2_MEMORY_MMAP; -+ buf->buffer.index = buf->index; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_QUERYBUF, &buf->buffer); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: query buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno); -+ return ret; -+ } -+ -+ if (V4L2_TYPE_IS_OUTPUT(type)) { -+ void *addr = mmap(NULL, buf->size, PROT_READ | PROT_WRITE, MAP_SHARED, ctx->video_fd, V4L2_TYPE_IS_MULTIPLANAR(type) ? buf->buffer.m.planes[0].m.mem_offset : buf->buffer.m.offset); -+ if (addr == MAP_FAILED) { -+ av_log(avctx, AV_LOG_ERROR, "%s: mmap failed, %s (%d)\n", __func__, strerror(errno), errno); -+ return -1; -+ } -+ -+ buf->addr = (uint8_t*)addr; -+ } else { -+ struct v4l2_exportbuffer exportbuffer = { -+ .type = type, -+ .index = buf->index, -+ .flags = O_RDONLY, -+ }; -+ -+ ret = ioctl(ctx->video_fd, VIDIOC_EXPBUF, &exportbuffer); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: export buffer %d failed, %s (%d)\n", __func__, buf->index, strerror(errno), errno); -+ return ret; -+ } -+ -+ buf->fd = exportbuffer.fd; -+ } -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size); -+ return 0; -+} -+ -+static void v4l2_request_buffer_free(V4L2RequestBuffer *buf) -+{ -+ av_log(NULL, AV_LOG_DEBUG, "%s: buf=%p index=%d fd=%d addr=%p width=%u height=%u size=%u\n", __func__, buf, buf->index, buf->fd, buf->addr, buf->width, buf->height, buf->size); -+ -+ if (buf->addr) -+ munmap(buf->addr, buf->size); -+ -+ if (buf->fd >= 0) -+ close(buf->fd); -+} -+ -+static void v4l2_request_frame_free(void *opaque, uint8_t *data) -+{ -+ AVCodecContext *avctx = opaque; -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)data; -+ -+ av_log(NULL, AV_LOG_DEBUG, "%s: avctx=%p data=%p request_fd=%d\n", __func__, avctx, data, req->request_fd); -+ -+ if (req->request_fd >= 0) -+ close(req->request_fd); -+ -+ v4l2_request_buffer_free(&req->capture); -+ v4l2_request_buffer_free(&req->output); -+ -+ av_free(data); -+} -+ -+static AVBufferRef *v4l2_request_frame_alloc(void *opaque, int size) -+{ -+ AVCodecContext *avctx = opaque; -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ V4L2RequestDescriptor *req; -+ AVBufferRef *ref; -+ uint8_t *data; -+ int ret; -+ -+ data = av_mallocz(size); -+ if (!data) -+ return NULL; -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p\n", __func__, avctx, size, data); -+ -+ ref = av_buffer_create(data, size, v4l2_request_frame_free, avctx, 0); -+ if (!ref) { -+ av_freep(&data); -+ return NULL; -+ } -+ -+ req = (V4L2RequestDescriptor*)data; -+ req->request_fd = -1; -+ req->output.fd = -1; -+ req->capture.fd = -1; -+ -+ ret = v4l2_request_buffer_alloc(avctx, &req->output, ctx->output_type); -+ if (ret < 0) { -+ av_buffer_unref(&ref); -+ return NULL; -+ } -+ -+ ret = v4l2_request_buffer_alloc(avctx, &req->capture, ctx->format.type); -+ if (ret < 0) { -+ av_buffer_unref(&ref); -+ return NULL; -+ } -+ -+ ret = ioctl(ctx->media_fd, MEDIA_IOC_REQUEST_ALLOC, &req->request_fd); -+ if (ret < 0) { -+ av_log(avctx, AV_LOG_ERROR, "%s: request alloc failed, %s (%d)\n", __func__, strerror(errno), errno); -+ av_buffer_unref(&ref); -+ return NULL; -+ } -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p request_fd=%d\n", __func__, avctx, size, data, req->request_fd); -+ return ref; -+} -+ -+static void v4l2_request_pool_free(void *opaque) -+{ -+ av_log(NULL, AV_LOG_DEBUG, "%s: opaque=%p\n", __func__, opaque); -+} -+ -+static void v4l2_request_hwframe_ctx_free(AVHWFramesContext *hwfc) -+{ -+ av_log(NULL, AV_LOG_DEBUG, "%s: hwfc=%p pool=%p\n", __func__, hwfc, hwfc->pool); -+ -+ av_buffer_pool_flush(hwfc->pool); -+ av_buffer_pool_uninit(&hwfc->pool); -+} -+ -+int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx) -+{ -+ V4L2RequestContext *ctx = avctx->internal->hwaccel_priv_data; -+ AVHWFramesContext *hwfc = (AVHWFramesContext*)hw_frames_ctx->data; -+ -+ hwfc->format = AV_PIX_FMT_DRM_PRIME; -+ hwfc->sw_format = AV_PIX_FMT_NV12; -+ if (V4L2_TYPE_IS_MULTIPLANAR(ctx->format.type)) { -+ hwfc->width = ctx->format.fmt.pix_mp.width; -+ hwfc->height = ctx->format.fmt.pix_mp.height; -+ } else { -+ hwfc->width = ctx->format.fmt.pix.width; -+ hwfc->height = ctx->format.fmt.pix.height; -+#if CONFIG_SAND -+ if (ctx->format.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12_COL128) { -+ hwfc->sw_format = AV_PIX_FMT_RPI4_8; -+ } -+ else if (ctx->format.fmt.pix.pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { -+ hwfc->sw_format = AV_PIX_FMT_RPI4_10; -+ } ++#ifndef V4L2_PIX_FMT_NV12_COL128 ++#define V4L2_PIX_FMT_NV12_COL128 v4l2_fourcc('N', 'C', '1', '2') /* 12 Y/CbCr 4:2:0 128 pixel wide column */ +#endif -+ } + -+ hwfc->pool = av_buffer_pool_init2(sizeof(V4L2RequestDescriptor), avctx, v4l2_request_frame_alloc, v4l2_request_pool_free); -+ if (!hwfc->pool) -+ return AVERROR(ENOMEM); -+ -+ hwfc->free = v4l2_request_hwframe_ctx_free; -+ -+ hwfc->initial_pool_size = 1; -+ -+ switch (avctx->codec_id) { -+ case AV_CODEC_ID_VP9: -+ hwfc->initial_pool_size += 8; -+ break; -+ case AV_CODEC_ID_VP8: -+ hwfc->initial_pool_size += 3; -+ break; -+ default: -+ hwfc->initial_pool_size += 2; -+ } -+ -+ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p hw_frames_ctx=%p hwfc=%p pool=%p width=%d height=%d initial_pool_size=%d\n", __func__, avctx, ctx, hw_frames_ctx, hwfc, hwfc->pool, hwfc->width, hwfc->height, hwfc->initial_pool_size); -+ -+ return 0; -+} -diff --git a/libavcodec/v4l2_request.h b/libavcodec/v4l2_request.h -new file mode 100644 -index 0000000000..20b56cfbfb ---- /dev/null -+++ b/libavcodec/v4l2_request.h -@@ -0,0 +1,96 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#ifndef AVCODEC_V4L2_REQUEST_H -+#define AVCODEC_V4L2_REQUEST_H -+ -+#include -+ -+#include "libavutil/hwcontext_drm.h" -+#include "v4l2_phase.h" -+ -+typedef struct V4L2RequestContext { -+ int video_fd; -+ int media_fd; -+ enum v4l2_buf_type output_type; -+ struct v4l2_format format; -+ int timestamp; -+} V4L2RequestContext; -+ -+typedef struct V4L2RequestBuffer { -+ int index; -+ int fd; -+ uint8_t *addr; -+ uint32_t width; -+ uint32_t height; -+ uint32_t size; -+ uint32_t used; -+ uint32_t capabilities; -+ struct v4l2_buffer buffer; -+} V4L2RequestBuffer; -+ -+struct V4l2PhaseControl; -+ -+typedef struct V4L2PhaseEnv { -+ struct V4L2PhaseEnv * next; -+ struct V4L2PhaseControl * ctrl; -+ unsigned int order; -+} V4L2PhaseEnv; -+ -+typedef struct V4L2RequestDescriptor { ++// Attached to buf[0] in frame ++// Pooled in hwcontext so generally create once - 1/frame ++typedef struct V4L2MediaReqDescriptor { + AVDRMFrameDescriptor drm; -+ int request_fd; -+ V4L2RequestBuffer output; -+ V4L2RequestBuffer capture; + -+ // Phase control -+ V4L2PhaseInfo phase; -+} V4L2RequestDescriptor; -+ -+uint64_t ff_v4l2_request_get_capture_timestamp(AVFrame *frame); -+ -+// Sets phase control on this frame & gives it an order -+int ff_v4l2_request_start_phase_control(AVFrame *frame, struct V4L2PhaseControl * phase); -+ -+// Had error - release all phases -+void ff_v4l2_request_abort_phase_control(AVFrame *frame); -+ -+ -+int ff_v4l2_request_reset_frame(AVCodecContext *avctx, AVFrame *frame); -+ -+int ff_v4l2_request_append_output_buffer(AVCodecContext *avctx, AVFrame *frame, const uint8_t *data, uint32_t size); -+ -+int ff_v4l2_request_set_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count); -+ -+int ff_v4l2_request_get_controls(AVCodecContext *avctx, struct v4l2_ext_control *control, int count); -+ -+int ff_v4l2_request_query_control(AVCodecContext *avctx, struct v4l2_query_ext_ctrl *control); -+ -+int ff_v4l2_request_query_control_default_value(AVCodecContext *avctx, uint32_t id); -+ -+int ff_v4l2_request_decode_slice(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count, int first_slice, int last_slice); -+ -+int ff_v4l2_request_decode_frame(AVCodecContext *avctx, AVFrame *frame, struct v4l2_ext_control *control, int count); -+ -+int ff_v4l2_request_init(AVCodecContext *avctx, uint32_t pixelformat, uint32_t buffersize, struct v4l2_ext_control *control, int count); -+ -+int ff_v4l2_request_uninit(AVCodecContext *avctx); -+ -+int ff_v4l2_request_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx); -+ -+#endif /* AVCODEC_V4L2_REQUEST_H */ -diff --git a/libavcodec/v4l2_request_h264.c b/libavcodec/v4l2_request_h264.c -new file mode 100644 -index 0000000000..d6332c01c7 ---- /dev/null -+++ b/libavcodec/v4l2_request_h264.c -@@ -0,0 +1,456 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#include "h264dec.h" -+#include "hwconfig.h" -+#include "v4l2_request.h" -+#include "h264-ctrls.h" -+ -+typedef struct V4L2RequestControlsH264 { -+ struct v4l2_ctrl_h264_sps sps; -+ struct v4l2_ctrl_h264_pps pps; -+ struct v4l2_ctrl_h264_scaling_matrix scaling_matrix; -+ struct v4l2_ctrl_h264_decode_params decode_params; -+ struct v4l2_ctrl_h264_slice_params slice_params; -+ struct v4l2_ctrl_h264_pred_weights pred_weights; -+ int pred_weights_required; -+ int first_slice; -+ int num_slices; -+} V4L2RequestControlsH264; -+ -+typedef struct V4L2RequestContextH264 { -+ V4L2RequestContext base; -+ int decode_mode; -+ int start_code; -+} V4L2RequestContextH264; -+ -+static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 }; -+ -+static void fill_weight_factors(struct v4l2_h264_weight_factors *factors, int list, const H264SliceContext *sl) -+{ -+ for (int i = 0; i < sl->ref_count[list]; i++) { -+ if (sl->pwt.luma_weight_flag[list]) { -+ factors->luma_weight[i] = sl->pwt.luma_weight[i][list][0]; -+ factors->luma_offset[i] = sl->pwt.luma_weight[i][list][1]; -+ } else { -+ factors->luma_weight[i] = 1 << sl->pwt.luma_log2_weight_denom; -+ factors->luma_offset[i] = 0; -+ } -+ for (int j = 0; j < 2; j++) { -+ if (sl->pwt.chroma_weight_flag[list]) { -+ factors->chroma_weight[i][j] = sl->pwt.chroma_weight[i][list][j][0]; -+ factors->chroma_offset[i][j] = sl->pwt.chroma_weight[i][list][j][1]; -+ } else { -+ factors->chroma_weight[i][j] = 1 << sl->pwt.chroma_log2_weight_denom; -+ factors->chroma_offset[i][j] = 0; -+ } -+ } -+ } -+} -+ -+static void fill_dpb_entry(struct v4l2_h264_dpb_entry *entry, const H264Picture *pic) -+{ -+ entry->reference_ts = ff_v4l2_request_get_capture_timestamp(pic->f); -+ entry->pic_num = pic->pic_id; -+ entry->frame_num = pic->frame_num; -+ entry->fields = pic->reference & V4L2_H264_FRAME_REF; -+ entry->flags = V4L2_H264_DPB_ENTRY_FLAG_VALID; -+ if (entry->fields) -+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_ACTIVE; -+ if (pic->long_ref) -+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM; -+ if (pic->field_picture) -+ entry->flags |= V4L2_H264_DPB_ENTRY_FLAG_FIELD; -+ if (pic->field_poc[0] != INT_MAX) -+ entry->top_field_order_cnt = pic->field_poc[0]; -+ if (pic->field_poc[1] != INT_MAX) -+ entry->bottom_field_order_cnt = pic->field_poc[1]; -+} -+ -+static void fill_dpb(struct v4l2_ctrl_h264_decode_params *decode, const H264Context *h) -+{ -+ int entries = 0; -+ -+ for (int i = 0; i < h->short_ref_count; i++) { -+ const H264Picture *pic = h->short_ref[i]; -+ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX)) -+ fill_dpb_entry(&decode->dpb[entries++], pic); -+ } -+ -+ if (!h->long_ref_count) -+ return; -+ -+ for (int i = 0; i < FF_ARRAY_ELEMS(h->long_ref); i++) { -+ const H264Picture *pic = h->long_ref[i]; -+ if (pic && (pic->field_poc[0] != INT_MAX || pic->field_poc[1] != INT_MAX)) -+ fill_dpb_entry(&decode->dpb[entries++], pic); -+ } -+} -+ -+static void fill_ref_list(struct v4l2_h264_reference *reference, struct v4l2_ctrl_h264_decode_params *decode, const H264Ref *ref) -+{ ++ // Media + uint64_t timestamp; ++ struct qent_dst * qe_dst; + -+ if (!ref->parent) -+ return; -+ -+ timestamp = ff_v4l2_request_get_capture_timestamp(ref->parent->f); -+ -+ for (uint8_t i = 0; i < FF_ARRAY_ELEMS(decode->dpb); i++) { -+ struct v4l2_h264_dpb_entry *entry = &decode->dpb[i]; -+ if ((entry->flags & V4L2_H264_DPB_ENTRY_FLAG_VALID) && -+ entry->reference_ts == timestamp) { -+ reference->fields = ref->reference & V4L2_H264_FRAME_REF; -+ reference->index = i; -+ return; -+ } -+ } -+} -+ -+static void fill_sps(struct v4l2_ctrl_h264_sps *ctrl, const H264Context *h) -+{ -+ const SPS *sps = h->ps.sps; -+ -+ *ctrl = (struct v4l2_ctrl_h264_sps) { -+ .profile_idc = sps->profile_idc, -+ .constraint_set_flags = sps->constraint_set_flags, -+ .level_idc = sps->level_idc, -+ .seq_parameter_set_id = sps->sps_id, -+ .chroma_format_idc = sps->chroma_format_idc, -+ .bit_depth_luma_minus8 = sps->bit_depth_luma - 8, -+ .bit_depth_chroma_minus8 = sps->bit_depth_chroma - 8, -+ .log2_max_frame_num_minus4 = sps->log2_max_frame_num - 4, -+ .pic_order_cnt_type = sps->poc_type, -+ .log2_max_pic_order_cnt_lsb_minus4 = sps->log2_max_poc_lsb - 4, -+ .max_num_ref_frames = sps->ref_frame_count, -+ .num_ref_frames_in_pic_order_cnt_cycle = sps->poc_cycle_length, -+ .offset_for_non_ref_pic = sps->offset_for_non_ref_pic, -+ .offset_for_top_to_bottom_field = sps->offset_for_top_to_bottom_field, -+ .pic_width_in_mbs_minus1 = h->mb_width - 1, -+ .pic_height_in_map_units_minus1 = sps->frame_mbs_only_flag ? h->mb_height - 1 : h->mb_height / 2 - 1, -+ }; -+ -+ if (sps->poc_cycle_length > 0 && sps->poc_cycle_length <= 255) -+ memcpy(ctrl->offset_for_ref_frame, sps->offset_for_ref_frame, sps->poc_cycle_length * sizeof(ctrl->offset_for_ref_frame[0])); -+ -+ if (sps->residual_color_transform_flag) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_SEPARATE_COLOUR_PLANE; -+ if (sps->transform_bypass) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_QPPRIME_Y_ZERO_TRANSFORM_BYPASS; -+ if (sps->delta_pic_order_always_zero_flag) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_DELTA_PIC_ORDER_ALWAYS_ZERO; -+ if (sps->gaps_in_frame_num_allowed_flag) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_GAPS_IN_FRAME_NUM_VALUE_ALLOWED; -+ if (sps->frame_mbs_only_flag) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_FRAME_MBS_ONLY; -+ if (sps->mb_aff) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_MB_ADAPTIVE_FRAME_FIELD; -+ if (sps->direct_8x8_inference_flag) -+ ctrl->flags |= V4L2_H264_SPS_FLAG_DIRECT_8X8_INFERENCE; -+} -+ -+static void fill_pps(struct v4l2_ctrl_h264_pps *ctrl, const H264Context *h) -+{ -+ const SPS *sps = h->ps.sps; -+ const PPS *pps = h->ps.pps; -+ const H264SliceContext *sl = &h->slice_ctx[0]; -+ int qp_bd_offset = 6 * (sps->bit_depth_luma - 8); -+ -+ *ctrl = (struct v4l2_ctrl_h264_pps) { -+ .pic_parameter_set_id = sl->pps_id, -+ .seq_parameter_set_id = pps->sps_id, -+ .num_slice_groups_minus1 = pps->slice_group_count - 1, -+ .num_ref_idx_l0_default_active_minus1 = pps->ref_count[0] - 1, -+ .num_ref_idx_l1_default_active_minus1 = pps->ref_count[1] - 1, -+ .weighted_bipred_idc = pps->weighted_bipred_idc, -+ .pic_init_qp_minus26 = pps->init_qp - 26 - qp_bd_offset, -+ .pic_init_qs_minus26 = pps->init_qs - 26 - qp_bd_offset, -+ .chroma_qp_index_offset = pps->chroma_qp_index_offset[0], -+ .second_chroma_qp_index_offset = pps->chroma_qp_index_offset[1], -+ }; -+ -+ if (pps->cabac) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_ENTROPY_CODING_MODE; -+ if (pps->pic_order_present) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_BOTTOM_FIELD_PIC_ORDER_IN_FRAME_PRESENT; -+ if (pps->weighted_pred) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_WEIGHTED_PRED; -+ if (pps->deblocking_filter_parameters_present) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_DEBLOCKING_FILTER_CONTROL_PRESENT; -+ if (pps->constrained_intra_pred) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_CONSTRAINED_INTRA_PRED; -+ if (pps->redundant_pic_cnt_present) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_REDUNDANT_PIC_CNT_PRESENT; -+ if (pps->transform_8x8_mode) -+ ctrl->flags |= V4L2_H264_PPS_FLAG_TRANSFORM_8X8_MODE; -+ /* FFmpeg always provide a scaling matrix */ -+ ctrl->flags |= V4L2_H264_PPS_FLAG_SCALING_MATRIX_PRESENT; -+} -+ -+static int v4l2_request_h264_start_frame(AVCodecContext *avctx, -+ av_unused const uint8_t *buffer, -+ av_unused uint32_t size) -+{ -+ const H264Context *h = avctx->priv_data; -+ const PPS *pps = h->ps.pps; -+ const SPS *sps = h->ps.sps; -+ const H264SliceContext *sl = &h->slice_ctx[0]; -+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; -+ -+ fill_sps(&controls->sps, h); -+ fill_pps(&controls->pps, h); -+ -+ memcpy(controls->scaling_matrix.scaling_list_4x4, pps->scaling_matrix4, sizeof(controls->scaling_matrix.scaling_list_4x4)); -+ memcpy(controls->scaling_matrix.scaling_list_8x8[0], pps->scaling_matrix8[0], sizeof(controls->scaling_matrix.scaling_list_8x8[0])); -+ memcpy(controls->scaling_matrix.scaling_list_8x8[1], pps->scaling_matrix8[3], sizeof(controls->scaling_matrix.scaling_list_8x8[1])); -+ -+ if (sps->chroma_format_idc == 3) { -+ memcpy(controls->scaling_matrix.scaling_list_8x8[2], pps->scaling_matrix8[1], sizeof(controls->scaling_matrix.scaling_list_8x8[2])); -+ memcpy(controls->scaling_matrix.scaling_list_8x8[3], pps->scaling_matrix8[4], sizeof(controls->scaling_matrix.scaling_list_8x8[3])); -+ memcpy(controls->scaling_matrix.scaling_list_8x8[4], pps->scaling_matrix8[2], sizeof(controls->scaling_matrix.scaling_list_8x8[4])); -+ memcpy(controls->scaling_matrix.scaling_list_8x8[5], pps->scaling_matrix8[5], sizeof(controls->scaling_matrix.scaling_list_8x8[5])); -+ } -+ -+ controls->decode_params = (struct v4l2_ctrl_h264_decode_params) { -+ .nal_ref_idc = h->nal_ref_idc, -+ .frame_num = h->poc.frame_num, -+ .top_field_order_cnt = h->cur_pic_ptr->field_poc[0] != INT_MAX ? h->cur_pic_ptr->field_poc[0] : 0, -+ .bottom_field_order_cnt = h->cur_pic_ptr->field_poc[1] != INT_MAX ? h->cur_pic_ptr->field_poc[1] : 0, -+ .idr_pic_id = sl->idr_pic_id, -+ .pic_order_cnt_lsb = sl->poc_lsb, -+ .delta_pic_order_cnt_bottom = sl->delta_poc_bottom, -+ .delta_pic_order_cnt0 = sl->delta_poc[0], -+ .delta_pic_order_cnt1 = sl->delta_poc[1], -+ /* Size in bits of dec_ref_pic_marking() syntax element. */ -+ .dec_ref_pic_marking_bit_size = sl->ref_pic_marking_size_in_bits, -+ /* Size in bits of pic order count syntax. */ -+ .pic_order_cnt_bit_size = sl->pic_order_cnt_bit_size, -+ .slice_group_change_cycle = 0, /* slice group not supported by FFmpeg */ -+ }; -+ -+ if (h->picture_idr) -+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_IDR_PIC; -+ if (FIELD_PICTURE(h)) -+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_FIELD_PIC; -+ if (h->picture_structure == PICT_BOTTOM_FIELD) -+ controls->decode_params.flags |= V4L2_H264_DECODE_PARAM_FLAG_BOTTOM_FIELD; -+ -+ fill_dpb(&controls->decode_params, h); -+ -+ controls->first_slice = !FIELD_PICTURE(h) || h->first_field; -+ controls->num_slices = 0; -+ -+ return ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f); -+} -+ -+static int v4l2_request_h264_queue_decode(AVCodecContext *avctx, int last_slice) -+{ -+ const H264Context *h = avctx->priv_data; -+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; -+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; -+ -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS, -+ .ptr = &controls->sps, -+ .size = sizeof(controls->sps), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_PPS, -+ .ptr = &controls->pps, -+ .size = sizeof(controls->pps), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_SCALING_MATRIX, -+ .ptr = &controls->scaling_matrix, -+ .size = sizeof(controls->scaling_matrix), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_PARAMS, -+ .ptr = &controls->decode_params, -+ .size = sizeof(controls->decode_params), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_SLICE_PARAMS, -+ .ptr = &controls->slice_params, -+ .size = sizeof(controls->slice_params), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_PRED_WEIGHTS, -+ .ptr = &controls->pred_weights, -+ .size = sizeof(controls->pred_weights), -+ }, -+ }; -+ -+ if (ctx->decode_mode == V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED) { -+ int count = FF_ARRAY_ELEMS(control) - (controls->pred_weights_required ? 0 : 1); -+ return ff_v4l2_request_decode_slice(avctx, h->cur_pic_ptr->f, control, count, controls->first_slice, last_slice); -+ } -+ -+ return ff_v4l2_request_decode_frame(avctx, h->cur_pic_ptr->f, control, FF_ARRAY_ELEMS(control) - 2); -+} -+ -+static int v4l2_request_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) -+{ -+ const H264Context *h = avctx->priv_data; -+ const PPS *pps = h->ps.pps; -+ const H264SliceContext *sl = &h->slice_ctx[0]; -+ V4L2RequestControlsH264 *controls = h->cur_pic_ptr->hwaccel_picture_private; -+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; -+ int i, ret, count; -+ -+ if (ctx->decode_mode == V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED && controls->num_slices) { -+ ret = v4l2_request_h264_queue_decode(avctx, 0); -+ if (ret) -+ return ret; -+ -+ ff_v4l2_request_reset_frame(avctx, h->cur_pic_ptr->f); -+ controls->first_slice = 0; -+ } -+ -+ if (ctx->start_code == V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B) { -+ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, nalu_slice_start_code, 3); -+ if (ret) -+ return ret; -+ } -+ -+ ret = ff_v4l2_request_append_output_buffer(avctx, h->cur_pic_ptr->f, buffer, size); -+ if (ret) -+ return ret; -+ -+ if (ctx->decode_mode != V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED) -+ return 0; -+ -+ controls->slice_params = (struct v4l2_ctrl_h264_slice_params) { -+ /* Offset in bits to slice_data() from the beginning of this slice. */ -+ .header_bit_size = get_bits_count(&sl->gb), -+ -+ .first_mb_in_slice = sl->first_mb_addr, -+ -+ .slice_type = ff_h264_get_slice_type(sl), -+ .colour_plane_id = 0, /* separate colour plane not supported by FFmpeg */ -+ .redundant_pic_cnt = sl->redundant_pic_count, -+ .cabac_init_idc = sl->cabac_init_idc, -+ .slice_qp_delta = sl->qscale - pps->init_qp, -+ .slice_qs_delta = 0, /* not implemented by FFmpeg */ -+ .disable_deblocking_filter_idc = sl->deblocking_filter < 2 ? !sl->deblocking_filter : sl->deblocking_filter, -+ .slice_alpha_c0_offset_div2 = sl->slice_alpha_c0_offset / 2, -+ .slice_beta_offset_div2 = sl->slice_beta_offset / 2, -+ .num_ref_idx_l0_active_minus1 = sl->list_count > 0 ? sl->ref_count[0] - 1 : 0, -+ .num_ref_idx_l1_active_minus1 = sl->list_count > 1 ? sl->ref_count[1] - 1 : 0, -+ }; -+ -+ if (sl->slice_type == AV_PICTURE_TYPE_B && sl->direct_spatial_mv_pred) -+ controls->slice_params.flags |= V4L2_H264_SLICE_FLAG_DIRECT_SPATIAL_MV_PRED; -+ /* V4L2_H264_SLICE_FLAG_SP_FOR_SWITCH: not implemented by FFmpeg */ -+ -+ controls->pred_weights_required = V4L2_H264_CTRL_PRED_WEIGHTS_REQUIRED(&controls->pps, &controls->slice_params); -+ if (controls->pred_weights_required) { -+ controls->pred_weights.chroma_log2_weight_denom = sl->pwt.chroma_log2_weight_denom; -+ controls->pred_weights.luma_log2_weight_denom = sl->pwt.luma_log2_weight_denom; -+ } -+ -+ count = sl->list_count > 0 ? sl->ref_count[0] : 0; -+ for (i = 0; i < count; i++) -+ fill_ref_list(&controls->slice_params.ref_pic_list0[i], &controls->decode_params, &sl->ref_list[0][i]); -+ if (count && controls->pred_weights_required) -+ fill_weight_factors(&controls->pred_weights.weight_factors[0], 0, sl); -+ -+ count = sl->list_count > 1 ? sl->ref_count[1] : 0; -+ for (i = 0; i < count; i++) -+ fill_ref_list(&controls->slice_params.ref_pic_list1[i], &controls->decode_params, &sl->ref_list[1][i]); -+ if (count && controls->pred_weights_required) -+ fill_weight_factors(&controls->pred_weights.weight_factors[1], 1, sl); -+ -+ controls->num_slices++; -+ return 0; -+} -+ -+static int v4l2_request_h264_end_frame(AVCodecContext *avctx) -+{ -+ const H264Context *h = avctx->priv_data; -+ return v4l2_request_h264_queue_decode(avctx, !FIELD_PICTURE(h) || !h->first_field); -+} -+ -+static int v4l2_request_h264_set_controls(AVCodecContext *avctx) -+{ -+ V4L2RequestContextH264 *ctx = avctx->internal->hwaccel_priv_data; -+ -+ struct v4l2_ext_control control[] = { -+ { .id = V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE, }, -+ { .id = V4L2_CID_MPEG_VIDEO_H264_START_CODE, }, -+ }; -+ -+ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_H264_DECODE_MODE); -+ if (ctx->decode_mode != V4L2_MPEG_VIDEO_H264_DECODE_MODE_SLICE_BASED && -+ ctx->decode_mode != V4L2_MPEG_VIDEO_H264_DECODE_MODE_FRAME_BASED) { -+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode); -+ return AVERROR(EINVAL); -+ } -+ -+ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_H264_START_CODE); -+ if (ctx->start_code != V4L2_MPEG_VIDEO_H264_START_CODE_NONE && -+ ctx->start_code != V4L2_MPEG_VIDEO_H264_START_CODE_ANNEX_B) { -+ av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code); -+ return AVERROR(EINVAL); -+ } -+ -+ control[0].value = ctx->decode_mode; -+ control[1].value = ctx->start_code; -+ -+ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); -+} -+ -+static int v4l2_request_h264_init(AVCodecContext *avctx) -+{ -+ const H264Context *h = avctx->priv_data; -+ struct v4l2_ctrl_h264_sps sps; -+ int ret; -+ -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_H264_SPS, -+ .ptr = &sps, -+ .size = sizeof(sps), -+ }, -+ }; -+ -+ fill_sps(&sps, h); -+ -+ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_H264_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control)); -+ if (ret) -+ return ret; -+ -+ return v4l2_request_h264_set_controls(avctx); -+} -+ -+const AVHWAccel ff_h264_v4l2request_hwaccel = { -+ .name = "h264_v4l2request", -+ .type = AVMEDIA_TYPE_VIDEO, -+ .id = AV_CODEC_ID_H264, -+ .pix_fmt = AV_PIX_FMT_DRM_PRIME, -+ .start_frame = v4l2_request_h264_start_frame, -+ .decode_slice = v4l2_request_h264_decode_slice, -+ .end_frame = v4l2_request_h264_end_frame, -+ .frame_priv_data_size = sizeof(V4L2RequestControlsH264), -+ .init = v4l2_request_h264_init, -+ .uninit = ff_v4l2_request_uninit, -+ .priv_data_size = sizeof(V4L2RequestContextH264), -+ .frame_params = ff_v4l2_request_frame_params, -+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, -+}; -diff --git a/libavcodec/v4l2_request_hevc.c b/libavcodec/v4l2_request_hevc.c -new file mode 100644 -index 0000000000..2e21145328 ---- /dev/null -+++ b/libavcodec/v4l2_request_hevc.c -@@ -0,0 +1,675 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#include "decode.h" -+#include "hevcdec.h" -+#include "hwconfig.h" -+#include "v4l2_request.h" -+#include "hevc-ctrls.h" -+#include "v4l2_phase.h" -+ -+#define MAX_SLICES 16 ++ // Decode only - should be NULL by the time we emit the frame ++ struct media_request *req; ++ struct qent_src *qe_src; ++} V4L2MediaReqDescriptor; + ++// Attached to frame - has no constructor/destructor so state only +typedef struct V4L2RequestControlsHEVC { + struct v4l2_ctrl_hevc_sps sps; + struct v4l2_ctrl_hevc_pps pps; + struct v4l2_ctrl_hevc_scaling_matrix scaling_matrix; + struct v4l2_ctrl_hevc_slice_params slice_params[MAX_SLICES]; + int first_slice; ++ int dst_qed; + int num_slices; //TODO: this should be in control +} V4L2RequestControlsHEVC; + ++// 1 per decoder +typedef struct V4L2RequestContextHEVC { -+ V4L2RequestContext base; ++// V4L2RequestContext base; ++ unsigned int timestamp; // ?? maybe uint64_t ++ + int decode_mode; + int start_code; + int max_slices; + -+ unsigned int order; -+ V4L2PhaseControl * pctrl; ++ struct devscan *devscan; ++ struct dmabufs_ctl *dbufs; ++ struct pollqueue *pq; ++ struct media_pool * mpool; ++ struct mediabufs_ctl *mbufs; +} V4L2RequestContextHEVC; + ++// Attached to frame - has a free function - my have a shorter lifespan than the frame ++// I haven't really sussed it ++typedef struct V4L2ReqFrameDataPrivHEVC { ++} V4L2ReqFrameDataPrivHEVC; ++ +static uint8_t nalu_slice_start_code[] = { 0x00, 0x00, 0x01 }; + -+static void v4l2_request_hevc_fill_pred_table(const HEVCContext *h, struct v4l2_hevc_pred_weight_table *table) ++static size_t bit_buf_size(unsigned int w, unsigned int h, unsigned int bits_minus8) ++{ ++ const size_t wxh = w * h; ++ size_t bits_alloc; ++ ++ /* Annex A gives a min compression of 2 @ lvl 3.1 ++ * (wxh <= 983040) and min 4 thereafter but avoid ++ * the odity of 983041 having a lower limit than ++ * 983040. ++ * Multiply by 3/2 for 4:2:0 ++ */ ++ bits_alloc = wxh < 983040 ? wxh * 3 / 4 : ++ wxh < 983040 * 2 ? 983040 * 3 / 4 : ++ wxh * 3 / 8; ++ /* Allow for bit depth */ ++ bits_alloc += (bits_alloc * bits_minus8) / 8; ++ return bits_alloc; ++} ++ ++static inline uint64_t frame_capture_dpb(const AVFrame * const frame) ++{ ++ const V4L2MediaReqDescriptor *const rd = (V4L2MediaReqDescriptor *)frame->data[0]; ++ return rd->timestamp; ++} ++ ++static inline void frame_set_capture_dpb(AVFrame * const frame, const uint64_t dpb_stamp) ++{ ++ V4L2MediaReqDescriptor *const rd = (V4L2MediaReqDescriptor *)frame->data[0]; ++ rd->timestamp = dpb_stamp; ++} ++ ++static void fill_pred_table(const HEVCContext *h, struct v4l2_hevc_pred_weight_table *table) +{ + int32_t luma_weight_denom, chroma_weight_denom; + const SliceHeader *sh = &h->sh; @@ -50206,19 +51637,19 @@ index 0000000000..2e21145328 + + for (i = 0; i < h->rps[ST_CURR_BEF].nb_refs; i++) { + frame = h->rps[ST_CURR_BEF].ref[i]; -+ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ if (frame && timestamp == frame_capture_dpb(frame->frame)) + return V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE; + } + + for (i = 0; i < h->rps[ST_CURR_AFT].nb_refs; i++) { + frame = h->rps[ST_CURR_AFT].ref[i]; -+ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ if (frame && timestamp == frame_capture_dpb(frame->frame)) + return V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER; + } + + for (i = 0; i < h->rps[LT_CURR].nb_refs; i++) { + frame = h->rps[LT_CURR].ref[i]; -+ if (frame && timestamp == ff_v4l2_request_get_capture_timestamp(frame->frame)) ++ if (frame && timestamp == frame_capture_dpb(frame->frame)) + return V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR; + } + @@ -50233,7 +51664,7 @@ index 0000000000..2e21145328 + if (!frame) + return 0; + -+ timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame); ++ timestamp = frame_capture_dpb(frame->frame); + + for (uint8_t i = 0; i < slice_params->num_active_dpb_entries; i++) { + struct v4l2_hevc_dpb_entry *entry = &slice_params->dpb[i]; @@ -50262,7 +51693,7 @@ index 0000000000..2e21145328 + return b; +} + -+static void v4l2_request_hevc_fill_slice_params(const HEVCContext *h, ++static void fill_slice_params(const HEVCContext *h, + struct v4l2_ctrl_hevc_slice_params *slice_params) +{ + const HEVCFrame *pic = h->ref; @@ -50339,7 +51770,7 @@ index 0000000000..2e21145328 + if (frame != pic && (frame->flags & (HEVC_FRAME_FLAG_LONG_REF | HEVC_FRAME_FLAG_SHORT_REF))) { + struct v4l2_hevc_dpb_entry *entry = &slice_params->dpb[entries++]; + -+ entry->timestamp = ff_v4l2_request_get_capture_timestamp(frame->frame); ++ entry->timestamp = frame_capture_dpb(frame->frame); + entry->rps = find_frame_rps_type(h, entry->timestamp); + entry->field_pic = frame->frame->interlaced_frame; + @@ -50363,7 +51794,7 @@ index 0000000000..2e21145328 + slice_params->ref_idx_l1[i] = get_ref_pic_index(h, rpl->ref[i], slice_params); + } + -+ v4l2_request_hevc_fill_pred_table(h, &slice_params->pred_weight_table); ++ fill_pred_table(h, &slice_params->pred_weight_table); + + slice_params->num_entry_point_offsets = sh->num_entry_point_offsets; + if (slice_params->num_entry_point_offsets > 256) { @@ -50432,6 +51863,148 @@ index 0000000000..2e21145328 + ctrl->flags |= V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED; +} + ++static void fill_scaling_matrix(const ScalingList * const sl, ++ struct v4l2_ctrl_hevc_scaling_matrix * const sm) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < 6; i++) { ++ unsigned int j; ++ ++ for (j = 0; j < 16; j++) ++ sm->scaling_list_4x4[i][j] = sl->sl[0][i][j]; ++ for (j = 0; j < 64; j++) { ++ sm->scaling_list_8x8[i][j] = sl->sl[1][i][j]; ++ sm->scaling_list_16x16[i][j] = sl->sl[2][i][j]; ++ if (i < 2) ++ sm->scaling_list_32x32[i][j] = sl->sl[3][i * 3][j]; ++ } ++ sm->scaling_list_dc_coef_16x16[i] = sl->sl_dc[0][i]; ++ if (i < 2) ++ sm->scaling_list_dc_coef_32x32[i] = sl->sl_dc[1][i * 3]; ++ } ++} ++ ++static void fill_pps(const HEVCPPS * const pps, struct v4l2_ctrl_hevc_pps * const ctrl) ++{ ++ uint64_t flags = 0; ++ ++ if (pps->dependent_slice_segments_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT; ++ ++ if (pps->output_flag_present_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT; ++ ++ if (pps->sign_data_hiding_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED; ++ ++ if (pps->cabac_init_present_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT; ++ ++ if (pps->constrained_intra_pred_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED; ++ ++ if (pps->transform_skip_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED; ++ ++ if (pps->cu_qp_delta_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED; ++ ++ if (pps->pic_slice_level_chroma_qp_offsets_present_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT; ++ ++ if (pps->weighted_pred_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED; ++ ++ if (pps->weighted_bipred_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED; ++ ++ if (pps->transquant_bypass_enable_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED; ++ ++ if (pps->tiles_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_TILES_ENABLED; ++ ++ if (pps->entropy_coding_sync_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED; ++ ++ if (pps->loop_filter_across_tiles_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; ++ ++ if (pps->seq_loop_filter_across_slices_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED; ++ ++ if (pps->deblocking_filter_override_enabled_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED; ++ ++ if (pps->disable_dbf) ++ flags |= V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER; ++ ++ if (pps->lists_modification_present_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT; ++ ++ if (pps->slice_header_extension_present_flag) ++ flags |= V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT; ++ ++ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ ++ *ctrl = (struct v4l2_ctrl_hevc_pps) { ++ .num_extra_slice_header_bits = pps->num_extra_slice_header_bits, ++ .init_qp_minus26 = pps->pic_init_qp_minus26, ++ .diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth, ++ .pps_cb_qp_offset = pps->cb_qp_offset, ++ .pps_cr_qp_offset = pps->cr_qp_offset, ++ .pps_beta_offset_div2 = pps->beta_offset / 2, ++ .pps_tc_offset_div2 = pps->tc_offset / 2, ++ .log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2, ++ .flags = flags ++ }; ++ ++ ++ if (pps->tiles_enabled_flag) { ++ ctrl->num_tile_columns_minus1 = pps->num_tile_columns - 1; ++ ctrl->num_tile_rows_minus1 = pps->num_tile_rows - 1; ++ ++ for (int i = 0; i < pps->num_tile_columns; i++) ++ ctrl->column_width_minus1[i] = pps->column_width[i] - 1; ++ ++ for (int i = 0; i < pps->num_tile_rows; i++) ++ ctrl->row_height_minus1[i] = pps->row_height[i] - 1; ++ } ++} ++ ++// Called before finally returning the frame to the user ++// Set corrupt flag here as this is actually the frame structure that ++// is going to the user (in MT land each thread has its own pool) ++static int frame_post_process(void *logctx, AVFrame *frame) ++{ ++ V4L2MediaReqDescriptor *rd = (V4L2MediaReqDescriptor*)frame->data[0]; ++ ++// av_log(NULL, AV_LOG_INFO, "%s\n", __func__); ++ frame->flags &= ~AV_FRAME_FLAG_CORRUPT; ++ if (rd->qe_dst) { ++ MediaBufsStatus stat = qent_dst_wait(rd->qe_dst); ++ if (stat != MEDIABUFS_STATUS_SUCCESS) { ++ av_log(logctx, AV_LOG_ERROR, "%s: Decode fail\n", __func__); ++ frame->flags |= AV_FRAME_FLAG_CORRUPT; ++ } ++ } ++ ++ return 0; ++} ++ ++static inline struct timeval cvt_timestamp_to_tv(const unsigned int t) ++{ ++ return (struct timeval){ ++ .tv_usec = t % 1000000, ++ .tv_sec = t / 1000000 ++ }; ++} ++ ++static inline uint64_t cvt_timestamp_to_dpb(const unsigned int t) ++{ ++ return (uint64_t)t * 1000; ++} ++ +static int v4l2_request_hevc_start_frame(AVCodecContext *avctx, + av_unused const uint8_t *buffer, + av_unused uint32_t size) @@ -50444,125 +52017,149 @@ index 0000000000..2e21145328 + sps->scaling_list_enable_flag ? + &sps->scaling_list : NULL; + V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; ++ V4L2MediaReqDescriptor *const rd = (V4L2MediaReqDescriptor *)h->ref->frame->data[0]; + V4L2RequestContextHEVC * const ctx = avctx->internal->hwaccel_priv_data; -+ int rv; + ++// av_log(NULL, AV_LOG_INFO, "%s\n", __func__); + fill_sps(&controls->sps, h); + -+ if (sl) { -+ for (int i = 0; i < 6; i++) { -+ for (int j = 0; j < 16; j++) -+ controls->scaling_matrix.scaling_list_4x4[i][j] = sl->sl[0][i][j]; -+ for (int j = 0; j < 64; j++) { -+ controls->scaling_matrix.scaling_list_8x8[i][j] = sl->sl[1][i][j]; -+ controls->scaling_matrix.scaling_list_16x16[i][j] = sl->sl[2][i][j]; -+ if (i < 2) -+ controls->scaling_matrix.scaling_list_32x32[i][j] = sl->sl[3][i * 3][j]; -+ } -+ controls->scaling_matrix.scaling_list_dc_coef_16x16[i] = sl->sl_dc[0][i]; -+ if (i < 2) -+ controls->scaling_matrix.scaling_list_dc_coef_32x32[i] = sl->sl_dc[1][i * 3]; ++ if (sl) ++ fill_scaling_matrix(sl, &controls->scaling_matrix); ++ ++ fill_pps(h->ps.pps, &controls->pps); ++ ++ controls->first_slice = 1; ++ controls->dst_qed = 0; ++ controls->num_slices = 0; ++ ctx->timestamp++; ++ ++// if ((rv = ff_v4l2_request_reset_frame(avctx, h->ref->frame)) != 0) ++// return rv; ++ ++ { ++ FrameDecodeData * const fdd = (FrameDecodeData*)h->ref->frame->private_ref->data; ++ fdd->post_process = frame_post_process; ++ } ++ ++ // qe_dst needs to be bound to the data buffer and only returned when that is ++ if (!rd->qe_dst) ++ { ++ if ((rd->qe_dst = mediabufs_dst_qent_alloc(ctx->mbufs, ctx->dbufs)) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to get dst buffer\n", __func__); ++ return AVERROR(ENOMEM); + } + } + -+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */ -+ controls->pps = (struct v4l2_ctrl_hevc_pps) { -+ .num_extra_slice_header_bits = pps->num_extra_slice_header_bits, -+ .init_qp_minus26 = pps->pic_init_qp_minus26, -+ .diff_cu_qp_delta_depth = pps->diff_cu_qp_delta_depth, -+ .pps_cb_qp_offset = pps->cb_qp_offset, -+ .pps_cr_qp_offset = pps->cr_qp_offset, -+ .pps_beta_offset_div2 = pps->beta_offset / 2, -+ .pps_tc_offset_div2 = pps->tc_offset / 2, -+ .log2_parallel_merge_level_minus2 = pps->log2_parallel_merge_level - 2, -+ }; ++// ff_v4l2_request_start_phase_control(h->ref->frame, ctx->pctrl); + -+ if (pps->dependent_slice_segments_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT; -+ -+ if (pps->output_flag_present_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT; -+ -+ if (pps->sign_data_hiding_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED; -+ -+ if (pps->cabac_init_present_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT; -+ -+ if (pps->constrained_intra_pred_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED; -+ -+ if (pps->transform_skip_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED; -+ -+ if (pps->cu_qp_delta_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED; -+ -+ if (pps->pic_slice_level_chroma_qp_offsets_present_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT; -+ -+ if (pps->weighted_pred_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED; -+ -+ if (pps->weighted_bipred_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED; -+ -+ if (pps->transquant_bypass_enable_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED; -+ -+ if (pps->tiles_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_TILES_ENABLED; -+ -+ if (pps->entropy_coding_sync_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED; -+ -+ if (pps->loop_filter_across_tiles_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED; -+ -+ if (pps->seq_loop_filter_across_slices_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED; -+ -+ if (pps->deblocking_filter_override_enabled_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED; -+ -+ if (pps->disable_dbf) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER; -+ -+ if (pps->lists_modification_present_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT; -+ -+ if (pps->slice_header_extension_present_flag) -+ controls->pps.flags |= V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT; -+ -+ if (pps->tiles_enabled_flag) { -+ controls->pps.num_tile_columns_minus1 = pps->num_tile_columns - 1; -+ controls->pps.num_tile_rows_minus1 = pps->num_tile_rows - 1; -+ -+ for (int i = 0; i < pps->num_tile_columns; i++) -+ controls->pps.column_width_minus1[i] = pps->column_width[i] - 1; -+ -+ for (int i = 0; i < pps->num_tile_rows; i++) -+ controls->pps.row_height_minus1[i] = pps->row_height[i] - 1; -+ } -+ -+ controls->first_slice = 1; -+ controls->num_slices = 0; -+ -+ if ((rv = ff_v4l2_request_reset_frame(avctx, h->ref->frame)) != 0) -+ return rv; -+ -+ ff_v4l2_request_start_phase_control(h->ref->frame, ctx->pctrl); -+ -+ ff_thread_finish_setup(avctx); // Allow next thread to enter rpi_hevc_start_frame ++// ff_thread_finish_setup(avctx); // Allow next thread to enter rpi_hevc_start_frame + + return 0; +} + -+static int v4l2_request_hevc_queue_decode(AVCodecContext *avctx, int last_slice) ++// Object fd & size will be zapped by this & need setting later ++static int drm_from_format(AVDRMFrameDescriptor * const desc, const struct v4l2_format * const format) ++{ ++ AVDRMLayerDescriptor *layer = &desc->layers[0]; ++ unsigned int width; ++ unsigned int height; ++ unsigned int bpl; ++ uint32_t pixelformat; ++ ++ if (V4L2_TYPE_IS_MULTIPLANAR(format->type)) { ++ width = format->fmt.pix_mp.width; ++ height = format->fmt.pix_mp.height; ++ pixelformat = format->fmt.pix_mp.pixelformat; ++ bpl = format->fmt.pix_mp.plane_fmt[0].bytesperline; ++ } ++ else { ++ width = format->fmt.pix.width; ++ height = format->fmt.pix.height; ++ pixelformat = format->fmt.pix.pixelformat; ++ bpl = format->fmt.pix.bytesperline; ++ } ++ ++ switch (pixelformat) { ++ case V4L2_PIX_FMT_NV12: ++ layer->format = DRM_FORMAT_NV12; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; ++ break; ++#if CONFIG_SAND ++ case V4L2_PIX_FMT_NV12_COL128: ++ layer->format = DRM_FORMAT_NV12; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(bpl); ++ break; ++ case V4L2_PIX_FMT_NV12_10_COL128: ++ layer->format = DRM_FORMAT_P030; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(bpl); ++ break; ++#endif ++#ifdef DRM_FORMAT_MOD_ALLWINNER_TILED ++ case V4L2_PIX_FMT_SUNXI_TILED_NV12: ++ layer->format = DRM_FORMAT_NV12; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_ALLWINNER_TILED; ++ break; ++#endif ++#if defined(V4L2_PIX_FMT_NV15) && defined(DRM_FORMAT_NV15) ++ case V4L2_PIX_FMT_NV15: ++ layer->format = DRM_FORMAT_NV15; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; ++ break; ++#endif ++ case V4L2_PIX_FMT_NV16: ++ layer->format = DRM_FORMAT_NV16; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; ++ break; ++#if defined(V4L2_PIX_FMT_NV20) && defined(DRM_FORMAT_NV20) ++ case V4L2_PIX_FMT_NV20: ++ layer->format = DRM_FORMAT_NV20; ++ desc->objects[0].format_modifier = DRM_FORMAT_MOD_LINEAR; ++ break; ++#endif ++ default: ++ return -1; ++ } ++ ++ desc->nb_objects = 1; ++ desc->objects[0].fd = -1; ++ desc->objects[0].size = 0; ++ ++ desc->nb_layers = 1; ++ layer->nb_planes = 2; ++ ++ layer->planes[0].object_index = 0; ++ layer->planes[0].offset = 0; ++ layer->planes[0].pitch = bpl; ++#if CONFIG_SAND ++ if (pixelformat == V4L2_PIX_FMT_NV12_COL128) { ++ layer->planes[1].object_index = 0; ++ layer->planes[1].offset = height * 128; ++ layer->planes[0].pitch = width; ++ layer->planes[1].pitch = width; ++ } ++ else if (pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { ++ layer->planes[1].object_index = 0; ++ layer->planes[1].offset = height * 128; ++ layer->planes[0].pitch = width * 2; // Lies but it keeps DRM import happy ++ layer->planes[1].pitch = width * 2; ++ } ++ else ++#endif ++ { ++ layer->planes[1].object_index = 0; ++ layer->planes[1].offset = layer->planes[0].pitch * height; ++ layer->planes[1].pitch = layer->planes[0].pitch; ++ } ++ ++ return 0; ++} ++ ++static int set_req_ctls(AVCodecContext *avctx, struct media_request * const mreq) +{ + const HEVCContext *h = avctx->priv_data; + V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; + V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ int rv; + + struct v4l2_ext_control control[] = { + { @@ -50576,93 +52173,129 @@ index 0000000000..2e21145328 + .size = sizeof(controls->pps), + }, + { -+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX, -+ .ptr = &controls->scaling_matrix, -+ .size = sizeof(controls->scaling_matrix), -+ }, -+ { + .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, + .ptr = &controls->slice_params, + .size = sizeof(controls->slice_params[0]) * FFMAX(FFMIN(controls->num_slices, MAX_SLICES), ctx->max_slices), + }, ++ // *** Make optional ++ { ++ .id = V4L2_CID_MPEG_VIDEO_HEVC_SCALING_MATRIX, ++ .ptr = &controls->scaling_matrix, ++ .size = sizeof(controls->scaling_matrix), ++ }, + }; + -+ if (ctx->decode_mode == V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED) -+ return ff_v4l2_request_decode_slice(avctx, h->ref->frame, control, FF_ARRAY_ELEMS(control), controls->first_slice, last_slice); ++ rv = mediabufs_ctl_set_ext_ctrls(ctx->mbufs, mreq, control, FF_ARRAY_ELEMS(control)); ++// return ff_v4l2_request_decode_frame(avctx, h->ref->frame, control, FF_ARRAY_ELEMS(control)); + -+ return ff_v4l2_request_decode_frame(avctx, h->ref->frame, control, FF_ARRAY_ELEMS(control)); ++ return rv; +} + +static int v4l2_request_hevc_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) +{ + const HEVCContext *h = avctx->priv_data; + V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; -+ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)h->ref->frame->data[0]; -+ int ret, slice = FFMIN(controls->num_slices, MAX_SLICES - 1); ++ V4L2RequestContextHEVC * const ctx = avctx->internal->hwaccel_priv_data; ++ V4L2MediaReqDescriptor * const rd = (V4L2MediaReqDescriptor*)h->ref->frame->data[0]; ++ int slice = FFMIN(controls->num_slices, MAX_SLICES - 1); + int bcount = get_bits_count(&h->HEVClc->gb); + uint32_t boff = (ptr_from_index(buffer, bcount/8 + 1) - (buffer + bcount/8 + 1)) * 8 + bcount; + -+ if (ctx->decode_mode == V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED && slice) { -+ ret = v4l2_request_hevc_queue_decode(avctx, 0); -+ if (ret) -+ return ret; ++ if (!controls->first_slice) { ++ MediaBufsStatus stat; + -+ ff_v4l2_request_reset_frame(avctx, h->ref->frame); -+ slice = controls->num_slices = 0; -+ controls->first_slice = 0; ++ // Dispatch previous slice ++ stat = mediabufs_start_request(ctx->mbufs, &rd->req, &rd->qe_src, ++ controls->dst_qed ? NULL : rd->qe_dst, 0); ++ if (stat != MEDIABUFS_STATUS_SUCCESS) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to start request\n", __func__); ++ return AVERROR_UNKNOWN; ++ } ++ controls->dst_qed = 1; ++ } ++ controls->first_slice = 0; ++ ++ // Get new req ++ if ((rd->req = media_request_get(ctx->mpool)) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to alloc media request\n", __func__); ++ return AVERROR_UNKNOWN; + } + -+ v4l2_request_hevc_fill_slice_params(h, &controls->slice_params[slice]); ++ fill_slice_params(h, controls->slice_params + slice); ++ ++ controls->slice_params[slice].bit_size = size * 8; //FIXME ++ controls->slice_params[slice].data_bit_offset = boff; //FIXME ++ ++ controls->num_slices++; ++ if (set_req_ctls(avctx, rd->req)) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to set ext ctrl slice params\n", __func__); ++ return AVERROR_UNKNOWN; ++ } ++ controls->num_slices = 0; ++ ++ if ((rd->qe_src = mediabufs_src_qent_get(ctx->mbufs)) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to get src buffer\n", __func__); ++ return AVERROR(ENOMEM); ++ } ++ ++ if (qent_src_data_copy(rd->qe_src, 0, buffer, size, ctx->dbufs) != 0) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed data copy\n", __func__); ++ return AVERROR(ENOMEM); ++ } ++ ++ { ++ struct timeval tv = cvt_timestamp_to_tv(ctx->timestamp); ++ frame_set_capture_dpb(h->ref->frame, cvt_timestamp_to_dpb(ctx->timestamp)); ++ qent_src_params_set(rd->qe_src, &tv); ++ } ++ ++ fill_slice_params(h, &controls->slice_params[slice]); + + if (ctx->start_code == V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B) { + // ?? Do we really not need the nal type ?? -+ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, nalu_slice_start_code, 3); -+ if (ret) -+ return ret; ++ av_log(avctx, AV_LOG_ERROR, "%s: NIF\n", __func__); + } -+ boff += req->output.used * 8; -+ -+ ret = ff_v4l2_request_append_output_buffer(avctx, h->ref->frame, buffer, size); -+ if (ret) -+ return ret; -+ -+ controls->slice_params[slice].bit_size = req->output.used * 8; //FIXME -+ controls->slice_params[slice].data_bit_offset = boff; //FIXME -+ controls->num_slices++; + return 0; +} + -+static void v4l2_request_hevc_abort_frame(AVCodecContext * const avctx) { -+ const HEVCContext *h = avctx->priv_data; ++static void v4l2_request_hevc_abort_frame(AVCodecContext * const avctx) ++{ ++ const HEVCContext * const h = avctx->priv_data; ++ V4L2MediaReqDescriptor * const rd = (V4L2MediaReqDescriptor*)h->ref->frame->data[0]; ++ V4L2RequestContextHEVC * const ctx = avctx->internal->hwaccel_priv_data; + -+ if (h->ref != NULL) -+ ff_v4l2_request_abort_phase_control(h->ref->frame); ++ media_request_abort(&rd->req); ++ mediabufs_src_qent_abort(ctx->mbufs, &rd->qe_src); +} + +static int v4l2_request_hevc_end_frame(AVCodecContext *avctx) +{ -+ int rv = v4l2_request_hevc_queue_decode(avctx, 1); -+ if (rv < 0) -+ v4l2_request_hevc_abort_frame(avctx); -+ return rv; -+} ++ const HEVCContext * const h = avctx->priv_data; ++ V4L2MediaReqDescriptor *rd = (V4L2MediaReqDescriptor*)h->ref->frame->data[0]; ++ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ MediaBufsStatus stat; ++ V4L2RequestControlsHEVC *controls = h->ref->hwaccel_picture_private; ++// av_log(NULL, AV_LOG_INFO, "%s\n", __func__); + -+// Called before finally returning the frame to the user -+// Set corrupt flag here as this is actually the frame structure that -+// is going to the user (in MT land each thread has its own pool) -+static int v4l2_request_post_process(void *logctx, AVFrame *frame) -+{ -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)frame->data[0]; -+ if (req) { -+ av_log(logctx, AV_LOG_DEBUG, "%s: flags=%#x, ts=%ld.%06ld\n", __func__, req->capture.buffer.flags, -+ req->capture.buffer.timestamp.tv_sec, req->capture.buffer.timestamp.tv_usec); -+ frame->flags = (req->capture.buffer.flags & V4L2_BUF_FLAG_ERROR) == 0 ? 0 : AV_FRAME_FLAG_CORRUPT; ++ // Dispatch previous slice ++ stat = mediabufs_start_request(ctx->mbufs, &rd->req, &rd->qe_src, ++ controls->dst_qed ? NULL : rd->qe_dst, 1); ++ if (stat != MEDIABUFS_STATUS_SUCCESS) { ++ av_log(avctx, AV_LOG_ERROR, "%s: Failed to start request\n", __func__); ++ return AVERROR_UNKNOWN; + } + ++ ff_thread_finish_setup(avctx); // Allow next thread to enter rpi_hevc_start_frame ++ ++ // Set the drm_prime desriptor ++ drm_from_format(&rd->drm, mediabufs_dst_fmt(ctx->mbufs)); ++ rd->drm.objects[0].fd = dmabuf_fd(qent_dst_dmabuf(rd->qe_dst, 0)); ++ rd->drm.objects[0].size = dmabuf_size(qent_dst_dmabuf(rd->qe_dst, 0)); ++ + return 0; +} + ++#if 0 +static int v4l2_request_hevc_alloc_frame(AVCodecContext * avctx, AVFrame *frame) +{ + int ret; @@ -50676,11 +52309,6 @@ index 0000000000..2e21145328 + if (ret < 0) + goto fail; + -+ { -+ FrameDecodeData *fdd = (FrameDecodeData*)frame->private_ref->data; -+ fdd->post_process = v4l2_request_post_process; -+ } -+ + return 0; + +fail: @@ -50691,90 +52319,312 @@ index 0000000000..2e21145328 + + return ret; +} ++#endif + -+static int v4l2_request_hevc_set_controls(AVCodecContext *avctx) ++static int set_controls(AVCodecContext *avctx) +{ + V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; + int ret; + -+ struct v4l2_ext_control control[] = { ++ struct v4l2_query_ext_ctrl querys[] = { ++ { .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE, }, ++ { .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE, }, ++ { .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, }, ++ }; ++ ++ struct v4l2_ext_control ctrls[] = { + { .id = V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE, }, + { .id = V4L2_CID_MPEG_VIDEO_HEVC_START_CODE, }, + }; -+ struct v4l2_query_ext_ctrl slice_params = { -+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS, -+ }; + -+ ctx->decode_mode = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE); ++ mediabufs_ctl_query_ext_ctrls(ctx->mbufs, querys, FF_ARRAY_ELEMS(querys)); ++ ++ ctx->decode_mode = querys[0].default_value; ++ + if (ctx->decode_mode != V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED && + ctx->decode_mode != V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED) { + av_log(avctx, AV_LOG_ERROR, "%s: unsupported decode mode, %d\n", __func__, ctx->decode_mode); + return AVERROR(EINVAL); + } + -+ ctx->start_code = ff_v4l2_request_query_control_default_value(avctx, V4L2_CID_MPEG_VIDEO_HEVC_START_CODE); ++ ctx->start_code = querys[1].default_value; + if (ctx->start_code != V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE && + ctx->start_code != V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B) { + av_log(avctx, AV_LOG_ERROR, "%s: unsupported start code, %d\n", __func__, ctx->start_code); + return AVERROR(EINVAL); + } + -+ ret = ff_v4l2_request_query_control(avctx, &slice_params); -+ if (ret) -+ return ret; -+ -+ ctx->max_slices = slice_params.elems; ++ ctx->max_slices = querys[2].elems; + if (ctx->max_slices > MAX_SLICES) { + av_log(avctx, AV_LOG_ERROR, "%s: unsupported max slices, %d\n", __func__, ctx->max_slices); + return AVERROR(EINVAL); + } + -+ control[0].value = ctx->decode_mode; -+ control[1].value = ctx->start_code; ++ ctrls[0].value = ctx->decode_mode; ++ ctrls[1].value = ctx->start_code; + -+ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); ++ ret = mediabufs_ctl_set_ext_ctrls(ctx->mbufs, NULL, ctrls, FF_ARRAY_ELEMS(ctrls)); ++ return !ret ? 0 : AVERROR(-ret); +} + +static int v4l2_request_hevc_uninit(AVCodecContext *avctx) +{ + V4L2RequestContextHEVC * const ctx = avctx->internal->hwaccel_priv_data; -+ ff_v4l2_phase_control_deletez(&ctx->pctrl); -+ return ff_v4l2_request_uninit(avctx); ++ ++ mediabufs_ctl_unref(&ctx->mbufs); ++ media_pool_delete(&ctx->mpool); ++ pollqueue_delete(&ctx->pq); ++ dmabufs_ctl_delete(&ctx->dbufs); ++ devscan_delete(&ctx->devscan); ++ ++// ff_v4l2_phase_control_deletez(&ctx->pctrl); ++ ++// if (avctx->hw_frames_ctx) { ++// AVHWFramesContext *hwfc = (AVHWFramesContext*)avctx->hw_frames_ctx->data; ++// av_buffer_pool_flush(hwfc->pool); ++// } ++ return 0; ++} ++ ++static int dst_fmt_accept_cb(void * v, const struct v4l2_fmtdesc *fmtdesc) ++{ ++ AVCodecContext *const avctx = v; ++ const HEVCContext *const h = avctx->priv_data; ++ ++ if (h->ps.sps->bit_depth == 8) { ++ if (fmtdesc->pixelformat == V4L2_PIX_FMT_NV12_COL128 || ++ fmtdesc->pixelformat == V4L2_PIX_FMT_NV12) { ++ return 1; ++ } ++ } ++ else if (h->ps.sps->bit_depth == 10) { ++ if (fmtdesc->pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { ++ return 1; ++ } ++ } ++ return 0; +} + +static int v4l2_request_hevc_init(AVCodecContext *avctx) +{ + const HEVCContext *h = avctx->priv_data; + V4L2RequestContextHEVC * const ctx = avctx->internal->hwaccel_priv_data; -+ struct v4l2_ctrl_hevc_sps sps; ++ const HEVCSPS * const sps = h->ps.sps; ++ struct v4l2_ctrl_hevc_sps ctrl_sps; + int ret; ++ const struct decdev * decdev; ++ uint32_t src_pix_fmt = V4L2_PIX_FMT_HEVC_SLICE; ++ size_t src_size; + -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_HEVC_SPS, -+ .ptr = &sps, -+ .size = sizeof(sps), -+ }, -+ }; ++ if ((ret = devscan_build(avctx, &ctx->devscan)) != 0) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to find any V4L2 devices\n"); ++ return (AVERROR(-ret)); ++ } ++ if ((decdev = devscan_find(ctx->devscan, src_pix_fmt)) == NULL) ++ { ++ av_log(avctx, AV_LOG_ERROR, "Failed to find a V4L2 device for H265\n"); ++ goto fail0; ++ } ++ av_log(avctx, AV_LOG_INFO, "Trying V4L2 devices: %s,%s\n", ++ decdev_media_path(decdev), decdev_video_path(decdev)); + -+ if ((ctx->pctrl = ff_v4l2_phase_control_new(2)) == NULL) -+ return AVERROR(ENOMEM); ++ if ((ctx->dbufs = dmabufs_ctl_new()) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "Unable to open dmabufs\n"); ++ goto fail0; ++ } + -+ fill_sps(&sps, h); ++ if ((ctx->pq = pollqueue_new()) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "Unable to create pollqueue\n"); ++ goto fail1; ++ } + -+ ret = ff_v4l2_request_init(avctx, V4L2_PIX_FMT_HEVC_SLICE, 4 * 1024 * 1024, control, FF_ARRAY_ELEMS(control)); ++ if ((ctx->mpool = media_pool_new(decdev_media_path(decdev), ctx->pq, 4)) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "Unable to create media pool\n"); ++ goto fail2; ++ } ++ ++ if ((ctx->mbufs = mediabufs_ctl_new(avctx, decdev_video_path(decdev), ctx->pq)) == NULL) { ++ av_log(avctx, AV_LOG_ERROR, "Unable to create media controls\n"); ++ goto fail3; ++ } ++ ++ fill_sps(&ctrl_sps, h); ++ ++ // Ask for an initial bitbuf size of max size / 4 ++ // We will realloc if we need more ++ // Must use sps->h/w as avctx contains cropped size ++ src_size = bit_buf_size(sps->width, sps->height, sps->bit_depth - 8); ++ if (mediabufs_src_resizable(ctx->mbufs)) ++ src_size /= 4; ++ if (mediabufs_src_fmt_set(ctx->mbufs, decdev_src_type(decdev), src_pix_fmt, ++ sps->width, sps->height, src_size)) { ++ char tbuf1[5]; ++ av_log(avctx, AV_LOG_ERROR, "Failed to set source format: %s %dx%d\n", strfourcc(tbuf1, src_pix_fmt), sps->width, sps->height); ++ goto fail4; ++ } ++ ++ if (mediabufs_set_ext_ctrl(ctx->mbufs, NULL, V4L2_CID_MPEG_VIDEO_HEVC_SPS, &ctrl_sps, sizeof(ctrl_sps))) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to set initial SPS\n"); ++ goto fail4; ++ } ++ ++ if (mediabufs_dst_fmt_set(ctx->mbufs, sps->width, sps->height, dst_fmt_accept_cb, avctx)) { ++ char tbuf1[5]; ++ av_log(avctx, AV_LOG_ERROR, "Failed to set destination format: %s %dx%d\n", strfourcc(tbuf1, src_pix_fmt), sps->width, sps->height); ++ goto fail4; ++ } ++ ++ if (mediabufs_src_pool_create(ctx->mbufs, ctx->dbufs, 6)) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create source pool\n"); ++ goto fail4; ++ } ++ ++ if (mediabufs_dst_slots_create(ctx->mbufs, 1)) { ++ av_log(avctx, AV_LOG_ERROR, "Failed to create destination slots\n"); ++ goto fail4; ++ } ++ ++ if (mediabufs_stream_on(ctx->mbufs)) { ++ av_log(avctx, AV_LOG_ERROR, "Failed stream on\n"); ++ goto fail4; ++ } ++ ++ ret = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_DRM); + if (ret) + return ret; + -+ return v4l2_request_hevc_set_controls(avctx); ++ return set_controls(avctx); ++ ++fail4: ++ mediabufs_ctl_unref(&ctx->mbufs); ++fail3: ++ media_pool_delete(&ctx->mpool); ++fail2: ++ pollqueue_delete(&ctx->pq); ++fail1: ++ dmabufs_ctl_delete(&ctx->dbufs); ++fail0: ++ devscan_delete(&ctx->devscan); ++ return AVERROR(ENOMEM); +} + ++ ++ ++ ++static void v4l2_req_frame_free(void *opaque, uint8_t *data) ++{ ++ AVCodecContext *avctx = opaque; ++ V4L2MediaReqDescriptor * const rd = (V4L2MediaReqDescriptor*)data; ++ ++ av_log(NULL, AV_LOG_DEBUG, "%s: avctx=%p data=%p\n", __func__, avctx, data); ++ ++ qent_dst_free(&rd->qe_dst); ++ ++ // We don't expect req or qe_src to be set ++ if (rd->req || rd->qe_src) ++ av_log(NULL, AV_LOG_ERROR, "%s: qe_src %p or req %p not NULL\n", __func__, rd->req, rd->qe_src); ++ ++ av_free(data); ++} ++ ++static AVBufferRef *v4l2_req_frame_alloc(void *opaque, int size) ++{ ++ AVCodecContext *avctx = opaque; ++// V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++// V4L2MediaReqDescriptor *req; ++ AVBufferRef *ref; ++ uint8_t *data; ++// int ret; ++ ++ data = av_mallocz(size); ++ if (!data) ++ return NULL; ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p size=%d data=%p\n", __func__, avctx, size, data); ++ ref = av_buffer_create(data, size, v4l2_req_frame_free, avctx, 0); ++ if (!ref) { ++ av_freep(&data); ++ return NULL; ++ } ++ return ref; ++} ++ ++static void v4l2_req_pool_free(void *opaque) ++{ ++ av_log(NULL, AV_LOG_DEBUG, "%s: opaque=%p\n", __func__, opaque); ++} ++ ++static void v4l2_req_hwframe_ctx_free(AVHWFramesContext *hwfc) ++{ ++ av_log(NULL, AV_LOG_DEBUG, "%s: hwfc=%p pool=%p\n", __func__, hwfc, hwfc->pool); ++ ++ av_buffer_pool_uninit(&hwfc->pool); ++} ++ ++static int v4l2_req_hevc_frame_params(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx) ++{ ++ V4L2RequestContextHEVC *ctx = avctx->internal->hwaccel_priv_data; ++ AVHWFramesContext *hwfc = (AVHWFramesContext*)hw_frames_ctx->data; ++ const struct v4l2_format *vfmt = mediabufs_dst_fmt(ctx->mbufs); ++ ++ hwfc->format = AV_PIX_FMT_DRM_PRIME; ++ hwfc->sw_format = AV_PIX_FMT_NV12; ++ if (V4L2_TYPE_IS_MULTIPLANAR(vfmt->type)) { ++ hwfc->width = vfmt->fmt.pix_mp.width; ++ hwfc->height = vfmt->fmt.pix_mp.height; ++#if CONFIG_SAND ++ if (vfmt->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_NV12_COL128) { ++ hwfc->sw_format = AV_PIX_FMT_RPI4_8; ++ } ++ else if (vfmt->fmt.pix_mp.pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { ++ hwfc->sw_format = AV_PIX_FMT_RPI4_10; ++ } ++#endif ++ } else { ++ hwfc->width = vfmt->fmt.pix.width; ++ hwfc->height = vfmt->fmt.pix.height; ++#if CONFIG_SAND ++ if (vfmt->fmt.pix.pixelformat == V4L2_PIX_FMT_NV12_COL128) { ++ hwfc->sw_format = AV_PIX_FMT_RPI4_8; ++ } ++ else if (vfmt->fmt.pix.pixelformat == V4L2_PIX_FMT_NV12_10_COL128) { ++ hwfc->sw_format = AV_PIX_FMT_RPI4_10; ++ } ++#endif ++ } ++ ++ hwfc->pool = av_buffer_pool_init2(sizeof(V4L2MediaReqDescriptor), avctx, v4l2_req_frame_alloc, v4l2_req_pool_free); ++ if (!hwfc->pool) ++ return AVERROR(ENOMEM); ++ ++ hwfc->free = v4l2_req_hwframe_ctx_free; ++ ++ hwfc->initial_pool_size = 1; ++ ++ switch (avctx->codec_id) { ++ case AV_CODEC_ID_VP9: ++ hwfc->initial_pool_size += 8; ++ break; ++ case AV_CODEC_ID_VP8: ++ hwfc->initial_pool_size += 3; ++ break; ++ default: ++ hwfc->initial_pool_size += 2; ++ } ++ ++ av_log(avctx, AV_LOG_DEBUG, "%s: avctx=%p ctx=%p hw_frames_ctx=%p hwfc=%p pool=%p width=%d height=%d initial_pool_size=%d\n", __func__, avctx, ctx, hw_frames_ctx, hwfc, hwfc->pool, hwfc->width, hwfc->height, hwfc->initial_pool_size); ++ ++ return 0; ++} ++ ++ ++ +const AVHWAccel ff_hevc_v4l2request_hwaccel = { + .name = "hevc_v4l2request", + .type = AVMEDIA_TYPE_VIDEO, + .id = AV_CODEC_ID_HEVC, + .pix_fmt = AV_PIX_FMT_DRM_PRIME, -+ .alloc_frame = v4l2_request_hevc_alloc_frame, ++// .alloc_frame = v4l2_request_hevc_alloc_frame, + .start_frame = v4l2_request_hevc_start_frame, + .decode_slice = v4l2_request_hevc_decode_slice, + .end_frame = v4l2_request_hevc_end_frame, @@ -50783,1436 +52633,158 @@ index 0000000000..2e21145328 + .init = v4l2_request_hevc_init, + .uninit = v4l2_request_hevc_uninit, + .priv_data_size = sizeof(V4L2RequestContextHEVC), -+ .frame_params = ff_v4l2_request_frame_params, ++ .frame_params = v4l2_req_hevc_frame_params, + .caps_internal = HWACCEL_CAP_ASYNC_SAFE | HWACCEL_CAP_MT_SAFE, +}; -diff --git a/libavcodec/v4l2_request_mpeg2.c b/libavcodec/v4l2_request_mpeg2.c +diff --git a/libavcodec/weak_link.c b/libavcodec/weak_link.c new file mode 100644 -index 0000000000..bc251a6fd2 +index 0000000000..83ce7c0653 --- /dev/null -+++ b/libavcodec/v4l2_request_mpeg2.c -@@ -0,0 +1,155 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ ++++ b/libavcodec/weak_link.c +@@ -0,0 +1,100 @@ ++#include ++#include ++#include ++#include "weak_link.h" + -+#include "hwconfig.h" -+#include "mpegvideo.h" -+#include "v4l2_request.h" -+#include "mpeg2-ctrls.h" -+ -+typedef struct V4L2RequestControlsMPEG2 { -+ struct v4l2_ctrl_mpeg2_slice_params slice_params; -+ struct v4l2_ctrl_mpeg2_quantization quantization; -+} V4L2RequestControlsMPEG2; -+ -+static int v4l2_request_mpeg2_start_frame(AVCodecContext *avctx, -+ av_unused const uint8_t *buffer, -+ av_unused uint32_t size) -+{ -+ const MpegEncContext *s = avctx->priv_data; -+ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private; -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)s->current_picture_ptr->f->data[0]; -+ -+ controls->slice_params = (struct v4l2_ctrl_mpeg2_slice_params) { -+ .bit_size = 0, -+ .data_bit_offset = 0, -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Slice */ -+ .quantiser_scale_code = s->qscale >> 1, -+ -+ .sequence = { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence header */ -+ .horizontal_size = s->width, -+ .vertical_size = s->height, -+ .vbv_buffer_size = req->output.size, -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Sequence extension */ -+ .profile_and_level_indication = 0, -+ .progressive_sequence = s->progressive_sequence, -+ .chroma_format = s->chroma_format, -+ }, -+ -+ .picture = { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture header */ -+ .picture_coding_type = s->pict_type, -+ -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Picture coding extension */ -+ .f_code[0][0] = s->mpeg_f_code[0][0], -+ .f_code[0][1] = s->mpeg_f_code[0][1], -+ .f_code[1][0] = s->mpeg_f_code[1][0], -+ .f_code[1][1] = s->mpeg_f_code[1][1], -+ .intra_dc_precision = s->intra_dc_precision, -+ .picture_structure = s->picture_structure, -+ .top_field_first = s->top_field_first, -+ .frame_pred_frame_dct = s->frame_pred_frame_dct, -+ .concealment_motion_vectors = s->concealment_motion_vectors, -+ .q_scale_type = s->q_scale_type, -+ .intra_vlc_format = s->intra_vlc_format, -+ .alternate_scan = s->alternate_scan, -+ .repeat_first_field = s->repeat_first_field, -+ .progressive_frame = s->progressive_frame, -+ }, -+ }; -+ -+ switch (s->pict_type) { -+ case AV_PICTURE_TYPE_B: -+ controls->slice_params.backward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->next_picture.f); -+ // fall-through -+ case AV_PICTURE_TYPE_P: -+ controls->slice_params.forward_ref_ts = ff_v4l2_request_get_capture_timestamp(s->last_picture.f); -+ } -+ -+ controls->quantization = (struct v4l2_ctrl_mpeg2_quantization) { -+ /* ISO/IEC 13818-2, ITU-T Rec. H.262: Quant matrix extension */ -+ .load_intra_quantiser_matrix = 1, -+ .load_non_intra_quantiser_matrix = 1, -+ .load_chroma_intra_quantiser_matrix = 1, -+ .load_chroma_non_intra_quantiser_matrix = 1, -+ }; -+ -+ for (int i = 0; i < 64; i++) { -+ int n = s->idsp.idct_permutation[ff_zigzag_direct[i]]; -+ controls->quantization.intra_quantiser_matrix[i] = s->intra_matrix[n]; -+ controls->quantization.non_intra_quantiser_matrix[i] = s->inter_matrix[n]; -+ controls->quantization.chroma_intra_quantiser_matrix[i] = s->chroma_intra_matrix[n]; -+ controls->quantization.chroma_non_intra_quantiser_matrix[i] = s->chroma_inter_matrix[n]; -+ } -+ -+ return ff_v4l2_request_reset_frame(avctx, s->current_picture_ptr->f); -+} -+ -+static int v4l2_request_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) -+{ -+ const MpegEncContext *s = avctx->priv_data; -+ -+ return ff_v4l2_request_append_output_buffer(avctx, s->current_picture_ptr->f, buffer, size); -+} -+ -+static int v4l2_request_mpeg2_end_frame(AVCodecContext *avctx) -+{ -+ const MpegEncContext *s = avctx->priv_data; -+ V4L2RequestControlsMPEG2 *controls = s->current_picture_ptr->hwaccel_picture_private; -+ V4L2RequestDescriptor *req = (V4L2RequestDescriptor*)s->current_picture_ptr->f->data[0]; -+ -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_SLICE_PARAMS, -+ .ptr = &controls->slice_params, -+ .size = sizeof(controls->slice_params), -+ }, -+ { -+ .id = V4L2_CID_MPEG_VIDEO_MPEG2_QUANTIZATION, -+ .ptr = &controls->quantization, -+ .size = sizeof(controls->quantization), -+ }, -+ }; -+ -+ controls->slice_params.bit_size = req->output.used * 8; -+ -+ return ff_v4l2_request_decode_frame(avctx, s->current_picture_ptr->f, control, FF_ARRAY_ELEMS(control)); -+} -+ -+static int v4l2_request_mpeg2_init(AVCodecContext *avctx) -+{ -+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_MPEG2_SLICE, 1024 * 1024, NULL, 0); -+} -+ -+const AVHWAccel ff_mpeg2_v4l2request_hwaccel = { -+ .name = "mpeg2_v4l2request", -+ .type = AVMEDIA_TYPE_VIDEO, -+ .id = AV_CODEC_ID_MPEG2VIDEO, -+ .pix_fmt = AV_PIX_FMT_DRM_PRIME, -+ .start_frame = v4l2_request_mpeg2_start_frame, -+ .decode_slice = v4l2_request_mpeg2_decode_slice, -+ .end_frame = v4l2_request_mpeg2_end_frame, -+ .frame_priv_data_size = sizeof(V4L2RequestControlsMPEG2), -+ .init = v4l2_request_mpeg2_init, -+ .uninit = ff_v4l2_request_uninit, -+ .priv_data_size = sizeof(V4L2RequestContext), -+ .frame_params = ff_v4l2_request_frame_params, -+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, ++struct ff_weak_link_master { ++ atomic_int ref_count; /* 0 is single ref for easier atomics */ ++ pthread_rwlock_t lock; ++ void * ptr; +}; -diff --git a/libavcodec/v4l2_request_vp8.c b/libavcodec/v4l2_request_vp8.c ++ ++static inline struct ff_weak_link_master * weak_link_x(struct ff_weak_link_client * c) ++{ ++ return (struct ff_weak_link_master *)c; ++} ++ ++struct ff_weak_link_master * ff_weak_link_new(void * p) ++{ ++ struct ff_weak_link_master * w = malloc(sizeof(*w)); ++ if (!w) ++ return NULL; ++ w->ptr = p; ++ if (pthread_rwlock_init(&w->lock, NULL)) { ++ free(w); ++ return NULL; ++ } ++ return w; ++} ++ ++static void weak_link_do_unref(struct ff_weak_link_master * const w) ++{ ++ int n = atomic_fetch_sub(&w->ref_count, 1); ++ if (n) ++ return; ++ ++ pthread_rwlock_destroy(&w->lock); ++ free(w); ++} ++ ++// Unref & break link ++void ff_weak_link_break(struct ff_weak_link_master ** ppLink) ++{ ++ struct ff_weak_link_master * const w = *ppLink; ++ if (!w) ++ return; ++ ++ *ppLink = NULL; ++ pthread_rwlock_wrlock(&w->lock); ++ w->ptr = NULL; ++ pthread_rwlock_unlock(&w->lock); ++ ++ weak_link_do_unref(w); ++} ++ ++struct ff_weak_link_client* ff_weak_link_ref(struct ff_weak_link_master * w) ++{ ++ atomic_fetch_add(&w->ref_count, 1); ++ return (struct ff_weak_link_client*)w; ++} ++ ++void ff_weak_link_unref(struct ff_weak_link_client ** ppLink) ++{ ++ struct ff_weak_link_master * const w = weak_link_x(*ppLink); ++ if (!w) ++ return; ++ ++ *ppLink = NULL; ++ weak_link_do_unref(w); ++} ++ ++void * ff_weak_link_lock(struct ff_weak_link_client ** ppLink) ++{ ++ struct ff_weak_link_master * const w = weak_link_x(*ppLink); ++ ++ if (!w) ++ return NULL; ++ ++ if (pthread_rwlock_rdlock(&w->lock)) ++ goto broken; ++ ++ if (w->ptr) ++ return w->ptr; ++ ++ pthread_rwlock_unlock(&w->lock); ++ ++broken: ++ *ppLink = NULL; ++ weak_link_do_unref(w); ++ return NULL; ++} ++ ++// Ignores a NULL c (so can be on the return path of both broken & live links) ++void ff_weak_link_unlock(struct ff_weak_link_client * c) ++{ ++ struct ff_weak_link_master * const w = weak_link_x(c); ++ if (w) ++ pthread_rwlock_unlock(&w->lock); ++} ++ ++ +diff --git a/libavcodec/weak_link.h b/libavcodec/weak_link.h new file mode 100644 -index 0000000000..ea2c55fa2f +index 0000000000..415b6a27a0 --- /dev/null -+++ b/libavcodec/v4l2_request_vp8.c -@@ -0,0 +1,181 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ ++++ b/libavcodec/weak_link.h +@@ -0,0 +1,23 @@ ++struct ff_weak_link_master; ++struct ff_weak_link_client; + -+#include "hwconfig.h" -+#include "v4l2_request.h" -+#include "vp8.h" -+#include "vp8-ctrls.h" ++struct ff_weak_link_master * ff_weak_link_new(void * p); ++void ff_weak_link_break(struct ff_weak_link_master ** ppLink); + -+typedef struct V4L2RequestControlsVP8 { -+ struct v4l2_ctrl_vp8_frame_header ctrl; -+} V4L2RequestControlsVP8; ++struct ff_weak_link_client* ff_weak_link_ref(struct ff_weak_link_master * w); ++void ff_weak_link_unref(struct ff_weak_link_client ** ppLink); + -+static int v4l2_request_vp8_start_frame(AVCodecContext *avctx, -+ av_unused const uint8_t *buffer, -+ av_unused uint32_t size) -+{ -+ const VP8Context *s = avctx->priv_data; -+ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; ++// Returns NULL if link broken - in this case it will also zap ++// *ppLink and unref the weak_link. ++// Returns NULL if *ppLink is NULL (so a link once broken stays broken) ++// ++// The above does mean that there is a race if this is called simultainiously ++// by two threads using the same weak_link_client (so don't do that) ++void * ff_weak_link_lock(struct ff_weak_link_client ** ppLink); ++void ff_weak_link_unlock(struct ff_weak_link_client * c); + -+ memset(&controls->ctrl, 0, sizeof(controls->ctrl)); -+ return ff_v4l2_request_reset_frame(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f); -+} + -+static int v4l2_request_vp8_end_frame(AVCodecContext *avctx) -+{ -+ const VP8Context *s = avctx->priv_data; -+ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER, -+ .ptr = &controls->ctrl, -+ .size = sizeof(controls->ctrl), -+ }, -+ }; + -+ return ff_v4l2_request_decode_frame(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f, -+ control, FF_ARRAY_ELEMS(control)); -+} + -+static int v4l2_request_vp8_decode_slice(AVCodecContext *avctx, -+ const uint8_t *buffer, -+ uint32_t size) -+{ -+ const VP8Context *s = avctx->priv_data; -+ V4L2RequestControlsVP8 *controls = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private; -+ struct v4l2_ctrl_vp8_frame_header *hdr = &controls->ctrl; -+ const uint8_t *data = buffer + 3 + 7 * s->keyframe; -+ unsigned int i, j, k; + -+ hdr->version = s->profile & 0x3; -+ hdr->width = avctx->width; -+ hdr->height = avctx->height; -+ /* FIXME: set ->xx_scale */ -+ hdr->prob_skip_false = s->prob->mbskip; -+ hdr->prob_intra = s->prob->intra; -+ hdr->prob_gf = s->prob->golden; -+ hdr->prob_last = s->prob->last; -+ hdr->first_part_size = s->header_partition_size; -+ hdr->first_part_header_bits = (8 * (s->coder_state_at_header_end.input - data) - -+ s->coder_state_at_header_end.bit_count - 8); -+ hdr->num_dct_parts = s->num_coeff_partitions; -+ for (i = 0; i < 8; i++) -+ hdr->dct_part_sizes[i] = s->coeff_partition_size[i]; + -+ hdr->coder_state.range = s->coder_state_at_header_end.range; -+ hdr->coder_state.value = s->coder_state_at_header_end.value; -+ hdr->coder_state.bit_count = s->coder_state_at_header_end.bit_count; -+ if (s->framep[VP56_FRAME_PREVIOUS]) -+ hdr->last_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_PREVIOUS]->tf.f); -+ if (s->framep[VP56_FRAME_GOLDEN]) -+ hdr->golden_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_GOLDEN]->tf.f); -+ if (s->framep[VP56_FRAME_GOLDEN2]) -+ hdr->alt_frame_ts = ff_v4l2_request_get_capture_timestamp(s->framep[VP56_FRAME_GOLDEN2]->tf.f); -+ hdr->flags |= s->invisible ? 0 : V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME; -+ hdr->flags |= s->mbskip_enabled ? V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF : 0; -+ hdr->flags |= (s->profile & 0x4) ? V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL : 0; -+ hdr->flags |= s->keyframe ? V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME : 0; -+ hdr->flags |= s->sign_bias[VP56_FRAME_GOLDEN] ? V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN : 0; -+ hdr->flags |= s->sign_bias[VP56_FRAME_GOLDEN2] ? V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT : 0; -+ hdr->segment_header.flags |= s->segmentation.enabled ? V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED : 0; -+ hdr->segment_header.flags |= s->segmentation.update_map ? V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP : 0; -+ hdr->segment_header.flags |= s->segmentation.update_feature_data ? V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA : 0; -+ hdr->segment_header.flags |= s->segmentation.absolute_vals ? 0 : V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE; -+ for (i = 0; i < 4; i++) { -+ hdr->segment_header.quant_update[i] = s->segmentation.base_quant[i]; -+ hdr->segment_header.lf_update[i] = s->segmentation.filter_level[i]; -+ } -+ -+ for (i = 0; i < 3; i++) -+ hdr->segment_header.segment_probs[i] = s->prob->segmentid[i]; -+ -+ hdr->lf_header.level = s->filter.level; -+ hdr->lf_header.sharpness_level = s->filter.sharpness; -+ hdr->lf_header.flags |= s->lf_delta.enabled ? V4L2_VP8_LF_HEADER_ADJ_ENABLE : 0; -+ hdr->lf_header.flags |= s->lf_delta.update ? V4L2_VP8_LF_HEADER_DELTA_UPDATE : 0; -+ hdr->lf_header.flags |= s->filter.simple ? V4L2_VP8_LF_FILTER_TYPE_SIMPLE : 0; -+ for (i = 0; i < 4; i++) { -+ hdr->lf_header.ref_frm_delta[i] = s->lf_delta.ref[i]; -+ hdr->lf_header.mb_mode_delta[i] = s->lf_delta.mode[i + MODE_I4x4]; -+ } -+ -+ // Probabilites -+ if (s->keyframe) { -+ static const uint8_t keyframe_y_mode_probs[4] = { -+ 145, 156, 163, 128 -+ }; -+ static const uint8_t keyframe_uv_mode_probs[3] = { -+ 142, 114, 183 -+ }; -+ -+ memcpy(hdr->entropy_header.y_mode_probs, keyframe_y_mode_probs, 4); -+ memcpy(hdr->entropy_header.uv_mode_probs, keyframe_uv_mode_probs, 3); -+ } else { -+ for (i = 0; i < 4; i++) -+ hdr->entropy_header.y_mode_probs[i] = s->prob->pred16x16[i]; -+ for (i = 0; i < 3; i++) -+ hdr->entropy_header.uv_mode_probs[i] = s->prob->pred8x8c[i]; -+ } -+ for (i = 0; i < 2; i++) -+ for (j = 0; j < 19; j++) -+ hdr->entropy_header.mv_probs[i][j] = s->prob->mvc[i][j]; -+ -+ for (i = 0; i < 4; i++) { -+ for (j = 0; j < 8; j++) { -+ static const int coeff_bands_inverse[8] = { -+ 0, 1, 2, 3, 5, 6, 4, 15 -+ }; -+ int coeff_pos = coeff_bands_inverse[j]; -+ -+ for (k = 0; k < 3; k++) { -+ memcpy(hdr->entropy_header.coeff_probs[i][j][k], -+ s->prob->token[i][coeff_pos][k], 11); -+ } -+ } -+ } -+ -+ hdr->quant_header.y_ac_qi = s->quant.yac_qi; -+ hdr->quant_header.y_dc_delta = s->quant.ydc_delta; -+ hdr->quant_header.y2_dc_delta = s->quant.y2dc_delta; -+ hdr->quant_header.y2_ac_delta = s->quant.y2ac_delta; -+ hdr->quant_header.uv_dc_delta = s->quant.uvdc_delta; -+ hdr->quant_header.uv_ac_delta = s->quant.uvac_delta; -+ -+ return ff_v4l2_request_append_output_buffer(avctx, s->framep[VP56_FRAME_CURRENT]->tf.f, buffer, size); -+} -+ -+static int v4l2_request_vp8_init(AVCodecContext *avctx) -+{ -+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP8_FRAME, 2 * 1024 * 1024, NULL, 0); -+} -+ -+const AVHWAccel ff_vp8_v4l2request_hwaccel = { -+ .name = "vp8_v4l2request", -+ .type = AVMEDIA_TYPE_VIDEO, -+ .id = AV_CODEC_ID_VP8, -+ .pix_fmt = AV_PIX_FMT_DRM_PRIME, -+ .start_frame = v4l2_request_vp8_start_frame, -+ .decode_slice = v4l2_request_vp8_decode_slice, -+ .end_frame = v4l2_request_vp8_end_frame, -+ .frame_priv_data_size = sizeof(V4L2RequestControlsVP8), -+ .init = v4l2_request_vp8_init, -+ .uninit = ff_v4l2_request_uninit, -+ .priv_data_size = sizeof(V4L2RequestContext), -+ .frame_params = ff_v4l2_request_frame_params, -+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, -+}; -diff --git a/libavcodec/v4l2_request_vp9.c b/libavcodec/v4l2_request_vp9.c -new file mode 100644 -index 0000000000..2e10b7ad1a ---- /dev/null -+++ b/libavcodec/v4l2_request_vp9.c -@@ -0,0 +1,353 @@ -+/* -+ * This file is part of FFmpeg. -+ * -+ * FFmpeg is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU Lesser General Public -+ * License as published by the Free Software Foundation; either -+ * version 2.1 of the License, or (at your option) any later version. -+ * -+ * FFmpeg is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ * Lesser General Public License for more details. -+ * -+ * You should have received a copy of the GNU Lesser General Public -+ * License along with FFmpeg; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+#include "hwconfig.h" -+#include "v4l2_request.h" -+#include "vp9dec.h" -+#include "vp9-ctrls.h" -+ -+typedef struct V4L2RequestControlsVP9 { -+ struct v4l2_ctrl_vp9_frame_decode_params decode_params; -+} V4L2RequestControlsVP9; -+ -+static const uint8_t ff_to_v4l2_intramode[] = { -+ [VERT_PRED] = V4L2_VP9_INTRA_PRED_MODE_V, -+ [HOR_PRED] = V4L2_VP9_INTRA_PRED_MODE_H, -+ [DC_PRED] = V4L2_VP9_INTRA_PRED_MODE_DC, -+ [DIAG_DOWN_LEFT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D45, -+ [DIAG_DOWN_RIGHT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D135, -+ [VERT_RIGHT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D117, -+ [HOR_DOWN_PRED] = V4L2_VP9_INTRA_PRED_MODE_D153, -+ [VERT_LEFT_PRED] = V4L2_VP9_INTRA_PRED_MODE_D63, -+ [HOR_UP_PRED] = V4L2_VP9_INTRA_PRED_MODE_D207, -+ [TM_VP8_PRED] = V4L2_VP9_INTRA_PRED_MODE_TM, -+}; -+ -+static int v4l2_request_vp9_set_frame_ctx(AVCodecContext *avctx, unsigned int id) -+{ -+ VP9Context *s = avctx->priv_data; -+ struct v4l2_ctrl_vp9_frame_ctx fctx = {}; -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(id), -+ .ptr = &fctx, -+ .size = sizeof(fctx), -+ }, -+ }; -+ -+ memcpy(fctx.probs.tx8, s->prob_ctx[id].p.tx8p, sizeof(s->prob_ctx[id].p.tx8p)); -+ memcpy(fctx.probs.tx16, s->prob_ctx[id].p.tx16p, sizeof(s->prob_ctx[id].p.tx16p)); -+ memcpy(fctx.probs.tx32, s->prob_ctx[id].p.tx32p, sizeof(s->prob_ctx[id].p.tx32p)); -+ memcpy(fctx.probs.coef, s->prob_ctx[id].coef, sizeof(s->prob_ctx[id].coef)); -+ memcpy(fctx.probs.skip, s->prob_ctx[id].p.skip, sizeof(s->prob_ctx[id].p.skip)); -+ memcpy(fctx.probs.inter_mode, s->prob_ctx[id].p.mv_mode, sizeof(s->prob_ctx[id].p.mv_mode)); -+ memcpy(fctx.probs.interp_filter, s->prob_ctx[id].p.filter, sizeof(s->prob_ctx[id].p.filter)); -+ memcpy(fctx.probs.is_inter, s->prob_ctx[id].p.intra, sizeof(s->prob_ctx[id].p.intra)); -+ memcpy(fctx.probs.comp_mode, s->prob_ctx[id].p.comp, sizeof(s->prob_ctx[id].p.comp)); -+ memcpy(fctx.probs.single_ref, s->prob_ctx[id].p.single_ref, sizeof(s->prob_ctx[id].p.single_ref)); -+ memcpy(fctx.probs.comp_ref, s->prob_ctx[id].p.comp_ref, sizeof(s->prob_ctx[id].p.comp_ref)); -+ memcpy(fctx.probs.y_mode, s->prob_ctx[id].p.y_mode, sizeof(s->prob_ctx[id].p.y_mode)); -+ for (unsigned i = 0; i < 10; i++) -+ memcpy(fctx.probs.uv_mode[ff_to_v4l2_intramode[i]], s->prob_ctx[id].p.uv_mode[i], sizeof(s->prob_ctx[id].p.uv_mode[0])); -+ for (unsigned i = 0; i < 4; i++) -+ memcpy(fctx.probs.partition[i * 4], s->prob_ctx[id].p.partition[3 - i], sizeof(s->prob_ctx[id].p.partition[0])); -+ memcpy(fctx.probs.mv.joint, s->prob_ctx[id].p.mv_joint, sizeof(s->prob_ctx[id].p.mv_joint)); -+ for (unsigned i = 0; i < 2; i++) { -+ fctx.probs.mv.sign[i] = s->prob_ctx[id].p.mv_comp[i].sign; -+ memcpy(fctx.probs.mv.class[i], s->prob_ctx[id].p.mv_comp[i].classes, sizeof(s->prob_ctx[id].p.mv_comp[0].classes)); -+ fctx.probs.mv.class0_bit[i] = s->prob_ctx[id].p.mv_comp[i].class0; -+ memcpy(fctx.probs.mv.bits[i], s->prob_ctx[id].p.mv_comp[i].bits, sizeof(s->prob_ctx[id].p.mv_comp[0].bits)); -+ memcpy(fctx.probs.mv.class0_fr[i], s->prob_ctx[id].p.mv_comp[i].class0_fp, sizeof(s->prob_ctx[id].p.mv_comp[0].class0_fp)); -+ memcpy(fctx.probs.mv.fr[i], s->prob_ctx[id].p.mv_comp[i].fp, sizeof(s->prob_ctx[id].p.mv_comp[0].fp)); -+ fctx.probs.mv.class0_hp[i] = s->prob_ctx[id].p.mv_comp[i].class0_hp; -+ fctx.probs.mv.hp[i] = s->prob_ctx[id].p.mv_comp[i].hp; -+ } -+ -+ return ff_v4l2_request_set_controls(avctx, control, FF_ARRAY_ELEMS(control)); -+} -+ -+static int v4l2_request_vp9_get_frame_ctx(AVCodecContext *avctx, unsigned int id) -+{ -+ VP9Context *s = avctx->priv_data; -+ struct v4l2_ctrl_vp9_frame_ctx fctx = {}; -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(id), -+ .ptr = &fctx, -+ .size = sizeof(fctx), -+ }, -+ }; -+ -+ int ret = ff_v4l2_request_get_controls(avctx, control, FF_ARRAY_ELEMS(control)); -+ if (ret) -+ return ret; -+ -+ memcpy(s->prob_ctx[id].p.tx8p, fctx.probs.tx8, sizeof(s->prob_ctx[id].p.tx8p)); -+ memcpy(s->prob_ctx[id].p.tx16p, fctx.probs.tx16, sizeof(s->prob_ctx[id].p.tx16p)); -+ memcpy(s->prob_ctx[id].p.tx32p, fctx.probs.tx32, sizeof(s->prob_ctx[id].p.tx32p)); -+ memcpy(s->prob_ctx[id].coef, fctx.probs.coef, sizeof(s->prob_ctx[id].coef)); -+ memcpy(s->prob_ctx[id].p.skip, fctx.probs.skip, sizeof(s->prob_ctx[id].p.skip)); -+ memcpy(s->prob_ctx[id].p.mv_mode, fctx.probs.inter_mode, sizeof(s->prob_ctx[id].p.mv_mode)); -+ memcpy(s->prob_ctx[id].p.filter, fctx.probs.interp_filter, sizeof(s->prob_ctx[id].p.filter)); -+ memcpy(s->prob_ctx[id].p.intra, fctx.probs.is_inter, sizeof(s->prob_ctx[id].p.intra)); -+ memcpy(s->prob_ctx[id].p.comp, fctx.probs.comp_mode, sizeof(s->prob_ctx[id].p.comp)); -+ memcpy(s->prob_ctx[id].p.single_ref, fctx.probs.single_ref, sizeof(s->prob_ctx[id].p.single_ref)); -+ memcpy(s->prob_ctx[id].p.comp_ref, fctx.probs.comp_ref, sizeof(s->prob_ctx[id].p.comp_ref)); -+ memcpy(s->prob_ctx[id].p.y_mode, fctx.probs.y_mode, sizeof(s->prob_ctx[id].p.y_mode)); -+ for (unsigned i = 0; i < 10; i++) -+ memcpy(s->prob_ctx[id].p.uv_mode[i], fctx.probs.uv_mode[ff_to_v4l2_intramode[i]], sizeof(s->prob_ctx[id].p.uv_mode[0])); -+ for (unsigned i = 0; i < 4; i++) -+ memcpy(s->prob_ctx[id].p.partition[3 - i], fctx.probs.partition[i * 4], sizeof(s->prob_ctx[id].p.partition[0])); -+ memcpy(s->prob_ctx[id].p.mv_joint, fctx.probs.mv.joint, sizeof(s->prob_ctx[id].p.mv_joint)); -+ for (unsigned i = 0; i < 2; i++) { -+ s->prob_ctx[id].p.mv_comp[i].sign = fctx.probs.mv.sign[i]; -+ memcpy(s->prob_ctx[id].p.mv_comp[i].classes, fctx.probs.mv.class[i], sizeof(s->prob_ctx[id].p.mv_comp[0].classes)); -+ s->prob_ctx[id].p.mv_comp[i].class0 = fctx.probs.mv.class0_bit[i]; -+ memcpy(s->prob_ctx[id].p.mv_comp[i].bits, fctx.probs.mv.bits[i], sizeof(s->prob_ctx[id].p.mv_comp[0].bits)); -+ memcpy(s->prob_ctx[id].p.mv_comp[i].class0_fp, fctx.probs.mv.class0_fr[i], sizeof(s->prob_ctx[id].p.mv_comp[0].class0_fp)); -+ memcpy(s->prob_ctx[id].p.mv_comp[i].fp, fctx.probs.mv.fr[i], sizeof(s->prob_ctx[id].p.mv_comp[0].fp)); -+ s->prob_ctx[id].p.mv_comp[i].class0_hp = fctx.probs.mv.class0_hp[i]; -+ s->prob_ctx[id].p.mv_comp[i].hp = fctx.probs.mv.hp[i]; -+ } -+ -+ return 0; -+} -+ -+static int v4l2_request_vp9_start_frame(AVCodecContext *avctx, -+ av_unused const uint8_t *buffer, -+ av_unused uint32_t size) -+{ -+ const VP9Context *s = avctx->priv_data; -+ const VP9Frame *f = &s->s.frames[CUR_FRAME]; -+ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private; -+ struct v4l2_ctrl_vp9_frame_decode_params *dec_params = &controls->decode_params; -+ int ret; -+ -+ if (s->s.h.keyframe || s->s.h.errorres || (s->s.h.intraonly && s->s.h.resetctx == 3)) { -+ for (unsigned i = 0; i < 4; i++) { -+ ret = v4l2_request_vp9_set_frame_ctx(avctx, i); -+ if (ret) -+ return ret; -+ } -+ } else if (s->s.h.intraonly && s->s.h.resetctx == 2) { -+ ret = v4l2_request_vp9_set_frame_ctx(avctx, s->s.h.framectxid); -+ if (ret) -+ return ret; -+ } -+ -+ if (s->s.h.keyframe) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_KEY_FRAME; -+ if (!s->s.h.invisible) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_SHOW_FRAME; -+ if (s->s.h.errorres) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT; -+ if (s->s.h.intraonly) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_INTRA_ONLY; -+ if (!s->s.h.keyframe && s->s.h.highprecisionmvs) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV; -+ if (s->s.h.refreshctx) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX; -+ if (s->s.h.parallelmode) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE; -+ if (s->ss_h) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING; -+ if (s->ss_v) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING; -+ if (avctx->color_range == AVCOL_RANGE_JPEG) -+ dec_params->flags |= V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING; -+ -+ dec_params->compressed_header_size = s->s.h.compressed_header_size; -+ dec_params->uncompressed_header_size = s->s.h.uncompressed_header_size; -+ dec_params->profile = s->s.h.profile; -+ dec_params->reset_frame_context = s->s.h.resetctx > 0 ? s->s.h.resetctx - 1 : 0; -+ dec_params->frame_context_idx = s->s.h.framectxid; -+ dec_params->bit_depth = s->s.h.bpp; -+ -+ dec_params->interpolation_filter = s->s.h.filtermode ^ (s->s.h.filtermode <= 1); -+ dec_params->tile_cols_log2 = s->s.h.tiling.log2_tile_cols; -+ dec_params->tile_rows_log2 = s->s.h.tiling.log2_tile_rows; -+ dec_params->tx_mode = s->s.h.txfmmode; -+ dec_params->reference_mode = s->s.h.comppredmode; -+ dec_params->frame_width_minus_1 = s->w - 1; -+ dec_params->frame_height_minus_1 = s->h - 1; -+ //dec_params->render_width_minus_1 = avctx->width - 1; -+ //dec_params->render_height_minus_1 = avctx->height - 1; -+ -+ for (unsigned i = 0; i < 3; i++) { -+ const ThreadFrame *ref = &s->s.refs[s->s.h.refidx[i]]; -+ if (ref->f && ref->f->buf[0]) -+ dec_params->refs[i] = ff_v4l2_request_get_capture_timestamp(ref->f); -+ } -+ -+ if (s->s.h.lf_delta.enabled) -+ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED; -+ if (s->s.h.lf_delta.updated) -+ dec_params->lf.flags |= V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE; -+ -+ dec_params->lf.level = s->s.h.filter.level; -+ dec_params->lf.sharpness = s->s.h.filter.sharpness; -+ for (unsigned i = 0; i < 4; i++) -+ dec_params->lf.ref_deltas[i] = s->s.h.lf_delta.ref[i]; -+ for (unsigned i = 0; i < 2; i++) -+ dec_params->lf.mode_deltas[i] = s->s.h.lf_delta.mode[i]; -+ for (unsigned i = 0; i < 8; i++) { -+ for (unsigned j = 0; j < 4; j++) -+ memcpy(dec_params->lf.level_lookup[i][j], s->s.h.segmentation.feat[i].lflvl[j], sizeof(dec_params->lf.level_lookup[0][0])); -+ } -+ -+ dec_params->quant.base_q_idx = s->s.h.yac_qi; -+ dec_params->quant.delta_q_y_dc = s->s.h.ydc_qdelta; -+ dec_params->quant.delta_q_uv_dc = s->s.h.uvdc_qdelta; -+ dec_params->quant.delta_q_uv_ac = s->s.h.uvac_qdelta; -+ -+ if (s->s.h.segmentation.enabled) -+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ENABLED; -+ if (s->s.h.segmentation.update_map) -+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP; -+ if (s->s.h.segmentation.temporal) -+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE; -+ if (s->s.h.segmentation.update_data) -+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA; -+ if (s->s.h.segmentation.absolute_vals) -+ dec_params->seg.flags |= V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE; -+ -+ for (unsigned i = 0; i < 7; i++) -+ dec_params->seg.tree_probs[i] = s->s.h.segmentation.prob[i]; -+ -+ if (s->s.h.segmentation.temporal) { -+ for (unsigned i = 0; i < 3; i++) -+ dec_params->seg.pred_probs[i] = s->s.h.segmentation.pred_prob[i]; -+ } else { -+ memset(dec_params->seg.pred_probs, 255, sizeof(dec_params->seg.pred_probs)); -+ } -+ -+ for (unsigned i = 0; i < 8; i++) { -+ if (s->s.h.segmentation.feat[i].q_enabled) { -+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_QP_DELTA; -+ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_QP_DELTA] = s->s.h.segmentation.feat[i].q_val; -+ } -+ -+ if (s->s.h.segmentation.feat[i].lf_enabled) { -+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_LF; -+ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_LF] = s->s.h.segmentation.feat[i].lf_val; -+ } -+ -+ if (s->s.h.segmentation.feat[i].ref_enabled) { -+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_REF_FRAME; -+ dec_params->seg.feature_data[i][V4L2_VP9_SEGMENT_FEATURE_REF_FRAME] = s->s.h.segmentation.feat[i].ref_val; -+ } -+ -+ if (s->s.h.segmentation.feat[i].skip_enabled) -+ dec_params->seg.feature_enabled[i] |= 1 << V4L2_VP9_SEGMENT_FEATURE_SKIP; -+ } -+ -+ memcpy(dec_params->probs.tx8, s->prob.p.tx8p, sizeof(s->prob.p.tx8p)); -+ memcpy(dec_params->probs.tx16, s->prob.p.tx16p, sizeof(s->prob.p.tx16p)); -+ memcpy(dec_params->probs.tx32, s->prob.p.tx32p, sizeof(s->prob.p.tx32p)); -+ for (unsigned i = 0; i < 4; i++) { -+ for (unsigned j = 0; j < 2; j++) { -+ for (unsigned k = 0; k < 2; k++) { -+ for (unsigned l = 0; l < 6; l++) { -+ for (unsigned m = 0; m < 6; m++) { -+ memcpy(dec_params->probs.coef[i][j][k][l][m], s->prob.coef[i][j][k][l][m], sizeof(dec_params->probs.coef[0][0][0][0][0])); -+ } -+ } -+ } -+ } -+ } -+ memcpy(dec_params->probs.skip, s->prob.p.skip, sizeof(s->prob.p.skip)); -+ memcpy(dec_params->probs.inter_mode, s->prob.p.mv_mode, sizeof(s->prob.p.mv_mode)); -+ memcpy(dec_params->probs.interp_filter, s->prob.p.filter, sizeof(s->prob.p.filter)); -+ memcpy(dec_params->probs.is_inter, s->prob.p.intra, sizeof(s->prob.p.intra)); -+ memcpy(dec_params->probs.comp_mode, s->prob.p.comp, sizeof(s->prob.p.comp)); -+ memcpy(dec_params->probs.single_ref, s->prob.p.single_ref, sizeof(s->prob.p.single_ref)); -+ memcpy(dec_params->probs.comp_ref, s->prob.p.comp_ref, sizeof(s->prob.p.comp_ref)); -+ memcpy(dec_params->probs.y_mode, s->prob.p.y_mode, sizeof(s->prob.p.y_mode)); -+ for (unsigned i = 0; i < 10; i++) -+ memcpy(dec_params->probs.uv_mode[ff_to_v4l2_intramode[i]], s->prob.p.uv_mode[i], sizeof(s->prob.p.uv_mode[0])); -+ for (unsigned i = 0; i < 4; i++) -+ memcpy(dec_params->probs.partition[i * 4], s->prob.p.partition[3 - i], sizeof(s->prob.p.partition[0])); -+ memcpy(dec_params->probs.mv.joint, s->prob.p.mv_joint, sizeof(s->prob.p.mv_joint)); -+ for (unsigned i = 0; i < 2; i++) { -+ dec_params->probs.mv.sign[i] = s->prob.p.mv_comp[i].sign; -+ memcpy(dec_params->probs.mv.class[i], s->prob.p.mv_comp[i].classes, sizeof(s->prob.p.mv_comp[0].classes)); -+ dec_params->probs.mv.class0_bit[i] = s->prob.p.mv_comp[i].class0; -+ memcpy(dec_params->probs.mv.bits[i], s->prob.p.mv_comp[i].bits, sizeof(s->prob.p.mv_comp[0].bits)); -+ memcpy(dec_params->probs.mv.class0_fr[i], s->prob.p.mv_comp[i].class0_fp, sizeof(s->prob.p.mv_comp[0].class0_fp)); -+ memcpy(dec_params->probs.mv.fr[i], s->prob.p.mv_comp[i].fp, sizeof(s->prob.p.mv_comp[0].fp)); -+ dec_params->probs.mv.class0_hp[i] = s->prob.p.mv_comp[i].class0_hp; -+ dec_params->probs.mv.hp[i] = s->prob.p.mv_comp[i].hp; -+ } -+ -+ return ff_v4l2_request_reset_frame(avctx, f->tf.f); -+} -+ -+static int v4l2_request_vp9_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size) -+{ -+ const VP9Context *s = avctx->priv_data; -+ const VP9Frame *f = &s->s.frames[CUR_FRAME]; -+ -+ return ff_v4l2_request_append_output_buffer(avctx, f->tf.f, buffer, size); -+} -+ -+static int v4l2_request_vp9_end_frame(AVCodecContext *avctx) -+{ -+ const VP9Context *s = avctx->priv_data; -+ const VP9Frame *f = &s->s.frames[CUR_FRAME]; -+ V4L2RequestControlsVP9 *controls = f->hwaccel_picture_private; -+ int ret; -+ -+ struct v4l2_ext_control control[] = { -+ { -+ .id = V4L2_CID_MPEG_VIDEO_VP9_FRAME_DECODE_PARAMS, -+ .ptr = &controls->decode_params, -+ .size = sizeof(controls->decode_params), -+ }, -+ }; -+ -+ ret = ff_v4l2_request_decode_frame(avctx, f->tf.f, control, FF_ARRAY_ELEMS(control)); -+ if (ret) -+ return ret; -+ -+ if (!s->s.h.refreshctx) -+ return 0; -+ -+ return v4l2_request_vp9_get_frame_ctx(avctx, s->s.h.framectxid); -+} -+ -+static int v4l2_request_vp9_init(AVCodecContext *avctx) -+{ -+ // TODO: check V4L2_CID_MPEG_VIDEO_VP9_PROFILE -+ return ff_v4l2_request_init(avctx, V4L2_PIX_FMT_VP9_FRAME, 3 * 1024 * 1024, NULL, 0); -+} -+ -+const AVHWAccel ff_vp9_v4l2request_hwaccel = { -+ .name = "vp9_v4l2request", -+ .type = AVMEDIA_TYPE_VIDEO, -+ .id = AV_CODEC_ID_VP9, -+ .pix_fmt = AV_PIX_FMT_DRM_PRIME, -+ .start_frame = v4l2_request_vp9_start_frame, -+ .decode_slice = v4l2_request_vp9_decode_slice, -+ .end_frame = v4l2_request_vp9_end_frame, -+ .frame_priv_data_size = sizeof(V4L2RequestControlsVP9), -+ .init = v4l2_request_vp9_init, -+ .uninit = ff_v4l2_request_uninit, -+ .priv_data_size = sizeof(V4L2RequestContext), -+ .frame_params = ff_v4l2_request_frame_params, -+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE, -+}; -diff --git a/libavcodec/vp8-ctrls.h b/libavcodec/vp8-ctrls.h -new file mode 100644 -index 0000000000..53cba826e4 ---- /dev/null -+++ b/libavcodec/vp8-ctrls.h -@@ -0,0 +1,112 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * These are the VP8 state controls for use with stateless VP8 -+ * codec drivers. -+ * -+ * It turns out that these structs are not stable yet and will undergo -+ * more changes. So keep them private until they are stable and ready to -+ * become part of the official public API. -+ */ -+ -+#ifndef _VP8_CTRLS_H_ -+#define _VP8_CTRLS_H_ -+ -+#include -+ -+#define V4L2_PIX_FMT_VP8_FRAME v4l2_fourcc('V', 'P', '8', 'F') -+ -+#define V4L2_CID_MPEG_VIDEO_VP8_FRAME_HEADER (V4L2_CID_MPEG_BASE + 2000) -+#define V4L2_CTRL_TYPE_VP8_FRAME_HEADER 0x301 -+ -+#define V4L2_VP8_SEGMENT_HEADER_FLAG_ENABLED 0x01 -+#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_MAP 0x02 -+#define V4L2_VP8_SEGMENT_HEADER_FLAG_UPDATE_FEATURE_DATA 0x04 -+#define V4L2_VP8_SEGMENT_HEADER_FLAG_DELTA_VALUE_MODE 0x08 -+ -+struct v4l2_vp8_segment_header { -+ __s8 quant_update[4]; -+ __s8 lf_update[4]; -+ __u8 segment_probs[3]; -+ __u8 padding; -+ __u32 flags; -+}; -+ -+#define V4L2_VP8_LF_HEADER_ADJ_ENABLE 0x01 -+#define V4L2_VP8_LF_HEADER_DELTA_UPDATE 0x02 -+#define V4L2_VP8_LF_FILTER_TYPE_SIMPLE 0x04 -+struct v4l2_vp8_loopfilter_header { -+ __s8 ref_frm_delta[4]; -+ __s8 mb_mode_delta[4]; -+ __u8 sharpness_level; -+ __u8 level; -+ __u16 padding; -+ __u32 flags; -+}; -+ -+struct v4l2_vp8_quantization_header { -+ __u8 y_ac_qi; -+ __s8 y_dc_delta; -+ __s8 y2_dc_delta; -+ __s8 y2_ac_delta; -+ __s8 uv_dc_delta; -+ __s8 uv_ac_delta; -+ __u16 padding; -+}; -+ -+struct v4l2_vp8_entropy_header { -+ __u8 coeff_probs[4][8][3][11]; -+ __u8 y_mode_probs[4]; -+ __u8 uv_mode_probs[3]; -+ __u8 mv_probs[2][19]; -+ __u8 padding[3]; -+}; -+ -+struct v4l2_vp8_entropy_coder_state { -+ __u8 range; -+ __u8 value; -+ __u8 bit_count; -+ __u8 padding; -+}; -+ -+#define V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME 0x01 -+#define V4L2_VP8_FRAME_HEADER_FLAG_EXPERIMENTAL 0x02 -+#define V4L2_VP8_FRAME_HEADER_FLAG_SHOW_FRAME 0x04 -+#define V4L2_VP8_FRAME_HEADER_FLAG_MB_NO_SKIP_COEFF 0x08 -+#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_GOLDEN 0x10 -+#define V4L2_VP8_FRAME_HEADER_FLAG_SIGN_BIAS_ALT 0x20 -+ -+#define VP8_FRAME_IS_KEY_FRAME(hdr) \ -+ (!!((hdr)->flags & V4L2_VP8_FRAME_HEADER_FLAG_KEY_FRAME)) -+ -+struct v4l2_ctrl_vp8_frame_header { -+ struct v4l2_vp8_segment_header segment_header; -+ struct v4l2_vp8_loopfilter_header lf_header; -+ struct v4l2_vp8_quantization_header quant_header; -+ struct v4l2_vp8_entropy_header entropy_header; -+ struct v4l2_vp8_entropy_coder_state coder_state; -+ -+ __u16 width; -+ __u16 height; -+ -+ __u8 horizontal_scale; -+ __u8 vertical_scale; -+ -+ __u8 version; -+ __u8 prob_skip_false; -+ __u8 prob_intra; -+ __u8 prob_last; -+ __u8 prob_gf; -+ __u8 num_dct_parts; -+ -+ __u32 first_part_size; -+ __u32 first_part_header_bits; -+ __u32 dct_part_sizes[8]; -+ -+ __u64 last_frame_ts; -+ __u64 golden_frame_ts; -+ __u64 alt_frame_ts; -+ -+ __u64 flags; -+}; -+ -+#endif -diff --git a/libavcodec/vp8.c b/libavcodec/vp8.c -index bab4223aca..0e1edb46fb 100644 ---- a/libavcodec/vp8.c -+++ b/libavcodec/vp8.c -@@ -175,6 +175,9 @@ static enum AVPixelFormat get_pixel_format(VP8Context *s) - #endif - #if CONFIG_VP8_NVDEC_HWACCEL - AV_PIX_FMT_CUDA, -+#endif -+#if CONFIG_VP8_V4L2REQUEST_HWACCEL -+ AV_PIX_FMT_DRM_PRIME, - #endif - AV_PIX_FMT_YUV420P, - AV_PIX_FMT_NONE, -@@ -198,7 +201,7 @@ int update_dimensions(VP8Context *s, int width, int height, int is_vp7) - return ret; - } - -- if (!s->actually_webp && !is_vp7) { -+ if (!s->actually_webp && !is_vp7 && s->pix_fmt == AV_PIX_FMT_NONE) { - s->pix_fmt = get_pixel_format(s); - if (s->pix_fmt < 0) - return AVERROR(EINVAL); -@@ -2968,6 +2971,9 @@ AVCodec ff_vp8_decoder = { - #endif - #if CONFIG_VP8_NVDEC_HWACCEL - HWACCEL_NVDEC(vp8), -+#endif -+#if CONFIG_VP8_V4L2REQUEST_HWACCEL -+ HWACCEL_V4L2REQUEST(vp8), - #endif - NULL - }, -diff --git a/libavcodec/vp9-ctrls.h b/libavcodec/vp9-ctrls.h -new file mode 100644 -index 0000000000..0cdea8a18b ---- /dev/null -+++ b/libavcodec/vp9-ctrls.h -@@ -0,0 +1,485 @@ -+/* SPDX-License-Identifier: GPL-2.0 */ -+/* -+ * These are the VP9 state controls for use with stateless VP9 -+ * codec drivers. -+ * -+ * It turns out that these structs are not stable yet and will undergo -+ * more changes. So keep them private until they are stable and ready to -+ * become part of the official public API. -+ */ -+ -+#ifndef _VP9_CTRLS_H_ -+#define _VP9_CTRLS_H_ -+ -+#include -+ -+#define V4L2_PIX_FMT_VP9_FRAME v4l2_fourcc('V', 'P', '9', 'F') -+ -+#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_CONTEXT(i) (V4L2_CID_MPEG_BASE + 4000 + (i)) -+#define V4L2_CID_MPEG_VIDEO_VP9_FRAME_DECODE_PARAMS (V4L2_CID_MPEG_BASE + 4004) -+#define V4L2_CTRL_TYPE_VP9_FRAME_CONTEXT 0x400 -+#define V4L2_CTRL_TYPE_VP9_FRAME_DECODE_PARAMS 0x404 -+ -+/** -+ * enum v4l2_vp9_loop_filter_flags - VP9 loop filter flags -+ * -+ * @V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED: the filter level depends on -+ * the mode and reference frame used -+ * to predict a block -+ * @V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE: the bitstream contains additional -+ * syntax elements that specify which -+ * mode and reference frame deltas -+ * are to be updated -+ * -+ * Those are the flags you should pass to &v4l2_vp9_loop_filter.flags. See -+ * section '7.2.8 Loop filter semantics' of the VP9 specification for more -+ * details. -+ */ -+enum v4l2_vp9_loop_filter_flags { -+ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_ENABLED = 1 << 0, -+ V4L2_VP9_LOOP_FILTER_FLAG_DELTA_UPDATE = 1 << 1, -+}; -+ -+/** -+ * struct v4l2_vp9_loop_filter - VP9 loop filter parameters -+ * -+ * @flags: combination of V4L2_VP9_LOOP_FILTER_FLAG_* flags -+ * @level: indicates the loop filter strength -+ * @sharpness: indicates the sharpness level -+ * @ref_deltas: contains the adjustment needed for the filter level based on -+ * the chosen reference frame -+ * @mode_deltas: contains the adjustment needed for the filter level based on -+ * the chosen mode -+ * @level_lookup: level lookup table -+ * -+ * This structure contains all loop filter related parameters. See sections -+ * '7.2.8 Loop filter semantics' and '8.8.1 Loop filter frame init process' -+ * of the VP9 specification for more details. -+ */ -+struct v4l2_vp9_loop_filter { -+ __u8 flags; -+ __u8 level; -+ __u8 sharpness; -+ __s8 ref_deltas[4]; -+ __s8 mode_deltas[2]; -+ __u8 level_lookup[8][4][2]; -+}; -+ -+/** -+ * struct v4l2_vp9_quantization - VP9 quantization parameters -+ * -+ * @base_q_idx: indicates the base frame qindex -+ * @delta_q_y_dc: indicates the Y DC quantizer relative to base_q_idx -+ * @delta_q_uv_dc: indicates the UV DC quantizer relative to base_q_idx -+ * @delta_q_uv_ac indicates the UV AC quantizer relative to base_q_idx -+ * @padding: padding bytes to align things on 64 bits. Must be set to 0 -+ * -+ * Encodes the quantization parameters. See section '7.2.9 Quantization params -+ * syntax' of the VP9 specification for more details. -+ */ -+struct v4l2_vp9_quantization { -+ __u8 base_q_idx; -+ __s8 delta_q_y_dc; -+ __s8 delta_q_uv_dc; -+ __s8 delta_q_uv_ac; -+ __u8 padding[4]; -+}; -+ -+/** -+ * enum v4l2_vp9_segmentation_flags - VP9 segmentation flags -+ * -+ * @V4L2_VP9_SEGMENTATION_FLAG_ENABLED: indicates that this frame makes use of -+ * the segmentation tool -+ * @V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP: indicates that the segmentation map -+ * should be updated during the -+ * decoding of this frame -+ * @V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE: indicates that the updates to -+ * the segmentation map are coded -+ * relative to the existing -+ * segmentation map -+ * @V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA: indicates that new parameters are -+ * about to be specified for each -+ * segment -+ * @V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE: indicates that the -+ * segmentation parameters -+ * represent the actual values -+ * to be used -+ * -+ * Those are the flags you should pass to &v4l2_vp9_segmentation.flags. See -+ * section '7.2.10 Segmentation params syntax' of the VP9 specification for -+ * more details. -+ */ -+enum v4l2_vp9_segmentation_flags { -+ V4L2_VP9_SEGMENTATION_FLAG_ENABLED = 1 << 0, -+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_MAP = 1 << 1, -+ V4L2_VP9_SEGMENTATION_FLAG_TEMPORAL_UPDATE = 1 << 2, -+ V4L2_VP9_SEGMENTATION_FLAG_UPDATE_DATA = 1 << 3, -+ V4L2_VP9_SEGMENTATION_FLAG_ABS_OR_DELTA_UPDATE = 1 << 4, -+}; -+ -+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED(id) (1 << (id)) -+#define V4L2_VP9_SEGMENT_FEATURE_ENABLED_MASK 0xf -+ -+/** -+ * enum v4l2_vp9_segment_feature - VP9 segment feature IDs -+ * -+ * @V4L2_VP9_SEGMENT_FEATURE_QP_DELTA: QP delta segment feature -+ * @V4L2_VP9_SEGMENT_FEATURE_LF: loop filter segment feature -+ * @V4L2_VP9_SEGMENT_FEATURE_REF_FRAME: reference frame segment feature -+ * @V4L2_VP9_SEGMENT_FEATURE_SKIP: skip segment feature -+ * @V4L2_VP9_SEGMENT_FEATURE_CNT: number of segment features -+ * -+ * Segment feature IDs. See section '7.2.10 Segmentation params syntax' of the -+ * VP9 specification for more details. -+ */ -+enum v4l2_vp9_segment_feature { -+ V4L2_VP9_SEGMENT_FEATURE_QP_DELTA, -+ V4L2_VP9_SEGMENT_FEATURE_LF, -+ V4L2_VP9_SEGMENT_FEATURE_REF_FRAME, -+ V4L2_VP9_SEGMENT_FEATURE_SKIP, -+ V4L2_VP9_SEGMENT_FEATURE_CNT, -+}; -+ -+/** -+ * struct v4l2_vp9_segmentation - VP9 segmentation parameters -+ * -+ * @flags: combination of V4L2_VP9_SEGMENTATION_FLAG_* flags -+ * @tree_probs: specifies the probability values to be used when -+ * decoding a Segment-ID. See '5.15. Segmentation map' -+ * section of the VP9 specification for more details. -+ * @pred_prob: specifies the probability values to be used when decoding a -+ * Predicted-Segment-ID. See '6.4.14. Get segment id syntax' -+ * section of :ref:`vp9` for more details.. -+ * @padding: padding used to make things aligned on 64 bits. Shall be zero -+ * filled -+ * @feature_enabled: bitmask defining which features are enabled in each -+ * segment -+ * @feature_data: data attached to each feature. Data entry is only valid if -+ * the feature is enabled -+ * -+ * Encodes the quantization parameters. See section '7.2.10 Segmentation -+ * params syntax' of the VP9 specification for more details. -+ */ -+struct v4l2_vp9_segmentation { -+ __u8 flags; -+ __u8 tree_probs[7]; -+ __u8 pred_probs[3]; -+ __u8 padding[5]; -+ __u8 feature_enabled[8]; -+ __s16 feature_data[8][4]; -+}; -+ -+/** -+ * enum v4l2_vp9_intra_prediction_mode - VP9 Intra prediction modes -+ * -+ * @V4L2_VP9_INTRA_PRED_DC: DC intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_V: vertical intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_H: horizontal intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D45: D45 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D135: D135 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D117: D117 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D153: D153 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D207: D207 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_D63: D63 intra prediction -+ * @V4L2_VP9_INTRA_PRED_MODE_TM: True Motion intra prediction -+ * -+ * See section '7.4.5 Intra frame mode info semantics' for more details. -+ */ -+enum v4l2_vp9_intra_prediction_mode { -+ V4L2_VP9_INTRA_PRED_MODE_DC, -+ V4L2_VP9_INTRA_PRED_MODE_V, -+ V4L2_VP9_INTRA_PRED_MODE_H, -+ V4L2_VP9_INTRA_PRED_MODE_D45, -+ V4L2_VP9_INTRA_PRED_MODE_D135, -+ V4L2_VP9_INTRA_PRED_MODE_D117, -+ V4L2_VP9_INTRA_PRED_MODE_D153, -+ V4L2_VP9_INTRA_PRED_MODE_D207, -+ V4L2_VP9_INTRA_PRED_MODE_D63, -+ V4L2_VP9_INTRA_PRED_MODE_TM, -+}; -+ -+/** -+ * struct v4l2_vp9_mv_probabilities - VP9 Motion vector probabilities -+ * @joint: motion vector joint probabilities -+ * @sign: motion vector sign probabilities -+ * @class: motion vector class probabilities -+ * @class0_bit: motion vector class0 bit probabilities -+ * @bits: motion vector bits probabilities -+ * @class0_fr: motion vector class0 fractional bit probabilities -+ * @fr: motion vector fractional bit probabilities -+ * @class0_hp: motion vector class0 high precision fractional bit probabilities -+ * @hp: motion vector high precision fractional bit probabilities -+ */ -+struct v4l2_vp9_mv_probabilities { -+ __u8 joint[3]; -+ __u8 sign[2]; -+ __u8 class[2][10]; -+ __u8 class0_bit[2]; -+ __u8 bits[2][10]; -+ __u8 class0_fr[2][2][3]; -+ __u8 fr[2][3]; -+ __u8 class0_hp[2]; -+ __u8 hp[2]; -+}; -+ -+/** -+ * struct v4l2_vp9_probabilities - VP9 Probabilities -+ * -+ * @tx8: TX 8x8 probabilities -+ * @tx16: TX 16x16 probabilities -+ * @tx32: TX 32x32 probabilities -+ * @coef: coefficient probabilities -+ * @skip: skip probabilities -+ * @inter_mode: inter mode probabilities -+ * @interp_filter: interpolation filter probabilities -+ * @is_inter: is inter-block probabilities -+ * @comp_mode: compound prediction mode probabilities -+ * @single_ref: single ref probabilities -+ * @comp_ref: compound ref probabilities -+ * @y_mode: Y prediction mode probabilities -+ * @uv_mode: UV prediction mode probabilities -+ * @partition: partition probabilities -+ * @mv: motion vector probabilities -+ * -+ * Structure containing most VP9 probabilities. See the VP9 specification -+ * for more details. -+ */ -+struct v4l2_vp9_probabilities { -+ __u8 tx8[2][1]; -+ __u8 tx16[2][2]; -+ __u8 tx32[2][3]; -+ __u8 coef[4][2][2][6][6][3]; -+ __u8 skip[3]; -+ __u8 inter_mode[7][3]; -+ __u8 interp_filter[4][2]; -+ __u8 is_inter[4]; -+ __u8 comp_mode[5]; -+ __u8 single_ref[5][2]; -+ __u8 comp_ref[5]; -+ __u8 y_mode[4][9]; -+ __u8 uv_mode[10][9]; -+ __u8 partition[16][3]; -+ -+ struct v4l2_vp9_mv_probabilities mv; -+}; -+ -+/** -+ * enum v4l2_vp9_reset_frame_context - Valid values for -+ * &v4l2_ctrl_vp9_frame_decode_params->reset_frame_context -+ * -+ * @V4L2_VP9_RESET_FRAME_CTX_NONE: don't reset any frame context -+ * @V4L2_VP9_RESET_FRAME_CTX_SPEC: reset the frame context pointed by -+ * &v4l2_ctrl_vp9_frame_decode_params.frame_context_idx -+ * @V4L2_VP9_RESET_FRAME_CTX_ALL: reset all frame contexts -+ * -+ * See section '7.2 Uncompressed header semantics' of the VP9 specification -+ * for more details. -+ */ -+enum v4l2_vp9_reset_frame_context { -+ V4L2_VP9_RESET_FRAME_CTX_NONE, -+ V4L2_VP9_RESET_FRAME_CTX_SPEC, -+ V4L2_VP9_RESET_FRAME_CTX_ALL, -+}; -+ -+/** -+ * enum v4l2_vp9_interpolation_filter - VP9 interpolation filter types -+ * -+ * @V4L2_VP9_INTERP_FILTER_8TAP: height tap filter -+ * @V4L2_VP9_INTERP_FILTER_8TAP_SMOOTH: height tap smooth filter -+ * @V4L2_VP9_INTERP_FILTER_8TAP_SHARP: height tap sharp filter -+ * @V4L2_VP9_INTERP_FILTER_BILINEAR: bilinear filter -+ * @V4L2_VP9_INTERP_FILTER_SWITCHABLE: filter selection is signaled at the -+ * block level -+ * -+ * See section '7.2.7 Interpolation filter semantics' of the VP9 specification -+ * for more details. -+ */ -+enum v4l2_vp9_interpolation_filter { -+ V4L2_VP9_INTERP_FILTER_8TAP, -+ V4L2_VP9_INTERP_FILTER_8TAP_SMOOTH, -+ V4L2_VP9_INTERP_FILTER_8TAP_SHARP, -+ V4L2_VP9_INTERP_FILTER_BILINEAR, -+ V4L2_VP9_INTERP_FILTER_SWITCHABLE, -+}; -+ -+/** -+ * enum v4l2_vp9_reference_mode - VP9 reference modes -+ * -+ * @V4L2_VP9_REF_MODE_SINGLE: indicates that all the inter blocks use only a -+ * single reference frame to generate motion -+ * compensated prediction -+ * @V4L2_VP9_REF_MODE_COMPOUND: requires all the inter blocks to use compound -+ * mode. Single reference frame prediction is not -+ * allowed -+ * @V4L2_VP9_REF_MODE_SELECT: allows each individual inter block to select -+ * between single and compound prediction modes -+ * -+ * See section '7.3.6 Frame reference mode semantics' of the VP9 specification -+ * for more details. -+ */ -+enum v4l2_vp9_reference_mode { -+ V4L2_VP9_REF_MODE_SINGLE, -+ V4L2_VP9_REF_MODE_COMPOUND, -+ V4L2_VP9_REF_MODE_SELECT, -+}; -+ -+/** -+ * enum v4l2_vp9_tx_mode - VP9 TX modes -+ * -+ * @V4L2_VP9_TX_MODE_ONLY_4X4: transform size is 4x4 -+ * @V4L2_VP9_TX_MODE_ALLOW_8X8: transform size can be up to 8x8 -+ * @V4L2_VP9_TX_MODE_ALLOW_16X16: transform size can be up to 16x16 -+ * @V4L2_VP9_TX_MODE_ALLOW_32X32: transform size can be up to 32x32 -+ * @V4L2_VP9_TX_MODE_SELECT: bitstream contains transform size for each block -+ * -+ * See section '7.3.1 Tx mode semantics' of the VP9 specification for more -+ * details. -+ */ -+enum v4l2_vp9_tx_mode { -+ V4L2_VP9_TX_MODE_ONLY_4X4, -+ V4L2_VP9_TX_MODE_ALLOW_8X8, -+ V4L2_VP9_TX_MODE_ALLOW_16X16, -+ V4L2_VP9_TX_MODE_ALLOW_32X32, -+ V4L2_VP9_TX_MODE_SELECT, -+}; -+ -+/** -+ * enum v4l2_vp9_ref_id - VP9 Reference frame IDs -+ * -+ * @V4L2_REF_ID_LAST: last reference frame -+ * @V4L2_REF_ID_GOLDEN: golden reference frame -+ * @V4L2_REF_ID_ALTREF: alternative reference frame -+ * @V4L2_REF_ID_CNT: number of reference frames -+ * -+ * See section '7.4.12 Ref frames semantics' of the VP9 specification for more -+ * details. -+ */ -+enum v4l2_vp9_ref_id { -+ V4L2_REF_ID_LAST, -+ V4L2_REF_ID_GOLDEN, -+ V4L2_REF_ID_ALTREF, -+ V4L2_REF_ID_CNT, -+}; -+ -+/** -+ * enum v4l2_vp9_frame_flags - VP9 frame flags -+ * @V4L2_VP9_FRAME_FLAG_KEY_FRAME: the frame is a key frame -+ * @V4L2_VP9_FRAME_FLAG_SHOW_FRAME: the frame should be displayed -+ * @V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT: the decoding should be error resilient -+ * @V4L2_VP9_FRAME_FLAG_INTRA_ONLY: the frame does not reference other frames -+ * @V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV: the frame might can high precision -+ * motion vectors -+ * @V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX: frame context should be updated -+ * after decoding -+ * @V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE: parallel decoding is used -+ * @V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING: vertical subsampling is enabled -+ * @V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING: horizontal subsampling is enabled -+ * @V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING: full UV range is used -+ * -+ * Check the VP9 specification for more details. -+ */ -+enum v4l2_vp9_frame_flags { -+ V4L2_VP9_FRAME_FLAG_KEY_FRAME = 1 << 0, -+ V4L2_VP9_FRAME_FLAG_SHOW_FRAME = 1 << 1, -+ V4L2_VP9_FRAME_FLAG_ERROR_RESILIENT = 1 << 2, -+ V4L2_VP9_FRAME_FLAG_INTRA_ONLY = 1 << 3, -+ V4L2_VP9_FRAME_FLAG_ALLOW_HIGH_PREC_MV = 1 << 4, -+ V4L2_VP9_FRAME_FLAG_REFRESH_FRAME_CTX = 1 << 5, -+ V4L2_VP9_FRAME_FLAG_PARALLEL_DEC_MODE = 1 << 6, -+ V4L2_VP9_FRAME_FLAG_X_SUBSAMPLING = 1 << 7, -+ V4L2_VP9_FRAME_FLAG_Y_SUBSAMPLING = 1 << 8, -+ V4L2_VP9_FRAME_FLAG_COLOR_RANGE_FULL_SWING = 1 << 9, -+}; -+ -+#define V4L2_VP9_PROFILE_MAX 3 -+ -+/** -+ * struct v4l2_ctrl_vp9_frame_decode_params - VP9 frame decoding control -+ * -+ * @flags: combination of V4L2_VP9_FRAME_FLAG_* flags -+ * @compressed_header_size: compressed header size in bytes -+ * @uncompressed_header_size: uncompressed header size in bytes -+ * @profile: VP9 profile. Can be 0, 1, 2 or 3 -+ * @reset_frame_context: specifies whether the frame context should be reset -+ * to default values. See &v4l2_vp9_reset_frame_context -+ * for more details -+ * @frame_context_idx: frame context that should be used/updated -+ * @bit_depth: bits per components. Can be 8, 10 or 12. Note that not all -+ * profiles support 10 and/or 12 bits depths -+ * @interpolation_filter: specifies the filter selection used for performing -+ * inter prediction. See &v4l2_vp9_interpolation_filter -+ * for more details -+ * @tile_cols_log2: specifies the base 2 logarithm of the width of each tile -+ * (where the width is measured in units of 8x8 blocks). -+ * Shall be less than or equal to 6 -+ * @tile_rows_log2: specifies the base 2 logarithm of the height of each tile -+ * (where the height is measured in units of 8x8 blocks) -+ * @tx_mode: specifies the TX mode. See &v4l2_vp9_tx_mode for more details -+ * @reference_mode: specifies the type of inter prediction to be used. See -+ * &v4l2_vp9_reference_mode for more details -+ * @padding: needed to make this struct 64 bit aligned. Shall be filled with -+ * zeros -+ * @frame_width_minus_1: add 1 to it and you'll get the frame width expressed -+ * in pixels -+ * @frame_height_minus_1: add 1 to it and you'll get the frame height expressed -+ * in pixels -+ * @frame_width_minus_1: add 1 to it and you'll get the expected render width -+ * expressed in pixels. This is not used during the -+ * decoding process but might be used by HW scalers to -+ * prepare a frame that's ready for scanout -+ * @frame_height_minus_1: add 1 to it and you'll get the expected render height -+ * expressed in pixels. This is not used during the -+ * decoding process but might be used by HW scalers to -+ * prepare a frame that's ready for scanout -+ * @refs: array of reference frames. See &v4l2_vp9_ref_id for more details -+ * @lf: loop filter parameters. See &v4l2_vp9_loop_filter for more details -+ * @quant: quantization parameters. See &v4l2_vp9_quantization for more details -+ * @seg: segmentation parameters. See &v4l2_vp9_segmentation for more details -+ * @probs: probabilities. See &v4l2_vp9_probabilities for more details -+ */ -+struct v4l2_ctrl_vp9_frame_decode_params { -+ __u32 flags; -+ __u16 compressed_header_size; -+ __u16 uncompressed_header_size; -+ __u8 profile; -+ __u8 reset_frame_context; -+ __u8 frame_context_idx; -+ __u8 bit_depth; -+ __u8 interpolation_filter; -+ __u8 tile_cols_log2; -+ __u8 tile_rows_log2; -+ __u8 tx_mode; -+ __u8 reference_mode; -+ __u8 padding[6]; -+ __u16 frame_width_minus_1; -+ __u16 frame_height_minus_1; -+ __u16 render_width_minus_1; -+ __u16 render_height_minus_1; -+ __u64 refs[V4L2_REF_ID_CNT]; -+ struct v4l2_vp9_loop_filter lf; -+ struct v4l2_vp9_quantization quant; -+ struct v4l2_vp9_segmentation seg; -+ struct v4l2_vp9_probabilities probs; -+}; -+ -+#define V4L2_VP9_NUM_FRAME_CTX 4 -+ -+/** -+ * struct v4l2_ctrl_vp9_frame_ctx - VP9 frame context control -+ * -+ * @probs: VP9 probabilities -+ * -+ * This control is accessed in both direction. The user should initialize the -+ * 4 contexts with default values just after starting the stream. Then before -+ * decoding a frame it should query the current frame context (the one passed -+ * through &v4l2_ctrl_vp9_frame_decode_params.frame_context_idx) to initialize -+ * &v4l2_ctrl_vp9_frame_decode_params.probs. The probs are then adjusted based -+ * on the bitstream info and passed to the kernel. The codec should update -+ * the frame context after the frame has been decoded, so that next time -+ * userspace query this context it contains the updated probabilities. -+ */ -+struct v4l2_ctrl_vp9_frame_ctx { -+ struct v4l2_vp9_probabilities probs; -+}; -+ -+#endif /* _VP9_CTRLS_H_ */ -diff --git a/libavcodec/vp9.c b/libavcodec/vp9.c -index fd0bab14a2..434f905c62 100644 ---- a/libavcodec/vp9.c -+++ b/libavcodec/vp9.c -@@ -191,6 +191,7 @@ static int update_size(AVCodecContext *avctx, int w, int h) - #define HWACCEL_MAX (CONFIG_VP9_DXVA2_HWACCEL + \ - CONFIG_VP9_D3D11VA_HWACCEL * 2 + \ - CONFIG_VP9_NVDEC_HWACCEL + \ -+ CONFIG_VP9_V4L2REQUEST_HWACCEL + \ - CONFIG_VP9_VAAPI_HWACCEL + \ - CONFIG_VP9_VDPAU_HWACCEL) - enum AVPixelFormat pix_fmts[HWACCEL_MAX + 2], *fmtp = pix_fmts; -@@ -223,6 +224,9 @@ static int update_size(AVCodecContext *avctx, int w, int h) - #endif - #if CONFIG_VP9_VAAPI_HWACCEL - *fmtp++ = AV_PIX_FMT_VAAPI; -+#endif -+#if CONFIG_VP9_V4L2REQUEST_HWACCEL -+ *fmtp++ = AV_PIX_FMT_DRM_PRIME; - #endif - break; - case AV_PIX_FMT_YUV420P12: -@@ -231,6 +235,9 @@ static int update_size(AVCodecContext *avctx, int w, int h) - #endif - #if CONFIG_VP9_VAAPI_HWACCEL - *fmtp++ = AV_PIX_FMT_VAAPI; -+#endif -+#if CONFIG_VP9_V4L2REQUEST_HWACCEL -+ *fmtp++ = AV_PIX_FMT_DRM_PRIME; - #endif - break; - } -@@ -700,7 +707,8 @@ static int decode_frame_header(AVCodecContext *avctx, - get_bits(&s->gb, 8) : 255; - } - -- if (get_bits1(&s->gb)) { -+ s->s.h.segmentation.update_data = get_bits1(&s->gb); -+ if (s->s.h.segmentation.update_data) { - s->s.h.segmentation.absolute_vals = get_bits1(&s->gb); - for (i = 0; i < 8; i++) { - if ((s->s.h.segmentation.feat[i].q_enabled = get_bits1(&s->gb))) -@@ -1909,6 +1917,9 @@ AVCodec ff_vp9_decoder = { - #endif - #if CONFIG_VP9_VDPAU_HWACCEL - HWACCEL_VDPAU(vp9), -+#endif -+#if CONFIG_VP9_V4L2REQUEST_HWACCEL -+ HWACCEL_V4L2REQUEST(vp9), - #endif - NULL - }, -diff --git a/libavcodec/vp9shared.h b/libavcodec/vp9shared.h -index 54726df742..fee3568736 100644 ---- a/libavcodec/vp9shared.h -+++ b/libavcodec/vp9shared.h -@@ -131,6 +131,7 @@ typedef struct VP9BitstreamHeader { - uint8_t temporal; - uint8_t absolute_vals; - uint8_t update_map; -+ uint8_t update_data; - uint8_t prob[7]; - uint8_t pred_prob[3]; - struct { diff --git a/libavdevice/Makefile b/libavdevice/Makefile -index 6ea62b914e..c8c9eeb22b 100644 +index 6ea62b914e..a336cbc596 100644 --- a/libavdevice/Makefile +++ b/libavdevice/Makefile -@@ -45,6 +45,9 @@ OBJS-$(CONFIG_SNDIO_INDEV) += sndio_dec.o sndio.o - OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_enc.o sndio.o +@@ -46,6 +46,9 @@ OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_enc.o sndio.o OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o + OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o +OBJS-$(CONFIG_VOUT_DRM_OUTDEV) += drm_vout.o +OBJS-$(CONFIG_VOUT_EGL_OUTDEV) += egl_vout.o +OBJS-$(CONFIG_VOUT_RPI_OUTDEV) += rpi_vout.o - OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o OBJS-$(CONFIG_XCBGRAB_INDEV) += xcbgrab.o OBJS-$(CONFIG_XV_OUTDEV) += xv.o + diff --git a/libavdevice/alldevices.c b/libavdevice/alldevices.c index 8633433254..bc15112a00 100644 --- a/libavdevice/alldevices.c @@ -52229,10 +52801,10 @@ index 8633433254..bc15112a00 100644 diff --git a/libavdevice/drm_vout.c b/libavdevice/drm_vout.c new file mode 100644 -index 0000000000..c427b60d30 +index 0000000000..5362ac9e28 --- /dev/null +++ b/libavdevice/drm_vout.c -@@ -0,0 +1,613 @@ +@@ -0,0 +1,632 @@ +/* + * Copyright (c) 2020 John Cox for Raspberry Pi Trading + * @@ -52256,34 +52828,21 @@ index 0000000000..c427b60d30 + +// *** This module is a work in progress and its utility is strictly +// limited to testing. -+// Amongst other issues it doesn't wait for the pic to be displayed before -+// returning the buffer so flikering does occur. + +#include "libavutil/opt.h" -+#include "libavutil/avassert.h" +#include "libavutil/pixdesc.h" -+#include "libavutil/imgutils.h" +#include "libavutil/hwcontext_drm.h" +#include "libavformat/internal.h" +#include "avdevice.h" + +#include "pthread.h" +#include -+#include + -+#include "drm_fourcc.h" -+#include -+#include +#include +#include + -+#include "libavutil/rpi_sand_fns.h" -+ +#define TRACE_ALL 0 + -+#define NUM_BUFFERS 4 -+#define RPI_DISPLAY_ALL 0 -+ +#define DRM_MODULE "vc4" + +#define ERRSTR strerror(errno) @@ -52300,11 +52859,13 @@ index 0000000000..c427b60d30 +}; + +typedef struct drm_aux_s { -+ int fd; -+ uint32_t bo_handles[4]; + unsigned int fb_handle; ++ AVFrame * frame; +} drm_aux_t; + ++// Aux size should only need to be 2, but on a few streams (Hobbit) under FKMS ++// we get initial flicker probably due to dodgy drm timing ++#define AUX_SIZE 3 +typedef struct drm_display_env_s +{ + AVClass *class; @@ -52313,14 +52874,15 @@ index 0000000000..c427b60d30 + uint32_t con_id; + struct drm_setup setup; + enum AVPixelFormat avfmt; ++ int show_all; + -+ drm_aux_t aux[32]; ++ unsigned int ano; ++ drm_aux_t aux[AUX_SIZE]; + + pthread_t q_thread; -+ pthread_mutex_t q_lock; -+ sem_t q_sem; ++ sem_t q_sem_in; ++ sem_t q_sem_out; + int q_terminate; -+ AVFrame * q_this; + AVFrame * q_next; + +} drm_display_env_t; @@ -52329,7 +52891,7 @@ index 0000000000..c427b60d30 +static int drm_vout_write_trailer(AVFormatContext *s) +{ +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "%s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "%s\n", __func__); +#endif + + return 0; @@ -52340,7 +52902,7 @@ index 0000000000..c427b60d30 + const AVCodecParameters * const par = s->streams[0]->codecpar; + +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "%s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "%s\n", __func__); +#endif + if ( s->nb_streams > 1 + || par->codec_type != AVMEDIA_TYPE_VIDEO @@ -52352,41 +52914,119 @@ index 0000000000..c427b60d30 + return 0; +} + -+ -+static int do_display(AVFormatContext * const s, drm_display_env_t * const de, AVFrame * const frame) ++static int find_plane(struct AVFormatContext * const avctx, ++ const int drmfd, const int crtcidx, const uint32_t format, ++ uint32_t * const pplane_id) +{ ++ drmModePlaneResPtr planes; ++ drmModePlanePtr plane; ++ unsigned int i; ++ unsigned int j; ++ int ret = 0; ++ ++ planes = drmModeGetPlaneResources(drmfd); ++ if (!planes) ++ { ++ av_log(avctx, AV_LOG_WARNING, "drmModeGetPlaneResources failed: %s\n", ERRSTR); ++ return -1; ++ } ++ ++ for (i = 0; i < planes->count_planes; ++i) { ++ plane = drmModeGetPlane(drmfd, planes->planes[i]); ++ if (!planes) ++ { ++ av_log(avctx, AV_LOG_WARNING, "drmModeGetPlane failed: %s\n", ERRSTR); ++ break; ++ } ++ ++ if (!(plane->possible_crtcs & (1 << crtcidx))) { ++ drmModeFreePlane(plane); ++ continue; ++ } ++ ++ for (j = 0; j < plane->count_formats; ++j) { ++ if (plane->formats[j] == format) ++ break; ++ } ++ ++ if (j == plane->count_formats) { ++ drmModeFreePlane(plane); ++ continue; ++ } ++ ++ *pplane_id = plane->plane_id; ++ drmModeFreePlane(plane); ++ break; ++ } ++ ++ if (i == planes->count_planes) ++ ret = -1; ++ ++ drmModeFreePlaneResources(planes); ++ return ret; ++} ++ ++static void da_uninit(drm_display_env_t * const de, drm_aux_t * da) ++{ ++ if (da->fb_handle != 0) { ++ drmModeRmFB(de->drm_fd, da->fb_handle); ++ da->fb_handle = 0; ++ } ++ ++ av_frame_free(&da->frame); ++} ++ ++static int do_display(AVFormatContext * const s, drm_display_env_t * const de, AVFrame * frame) ++{ ++ const AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor*)frame->data[0]; ++ drm_aux_t * da = de->aux + de->ano; ++ const uint32_t format = desc->layers[0].format; + int ret = 0; + -+ const AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor*)frame->data[0]; -+ drm_aux_t * da = NULL; -+ unsigned int i; -+ +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "<<< %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "<<< %s: fd=%d\n", __func__, desc->objects[0].fd); +#endif + -+ for (i = 0; i != 32; ++i) { -+ if (de->aux[i].fd == -1 || de->aux[i].fd == desc->objects[0].fd) { -+ da = de->aux + i; -+ break; ++ if (de->setup.out_fourcc != format) { ++ if (find_plane(s, de->drm_fd, de->setup.crtcIdx, format, &de->setup.planeId)) { ++ av_frame_free(&frame); ++ av_log(s, AV_LOG_WARNING, "No plane for format: %#x\n", format); ++ return -1; ++ } ++ de->setup.out_fourcc = format; ++ } ++ ++ { ++ drmVBlank vbl = { ++ .request = { ++ .type = DRM_VBLANK_RELATIVE, ++ .sequence = 0 ++ } ++ }; ++ ++ while (drmWaitVBlank(de->drm_fd, &vbl)) { ++ if (errno != EINTR) { ++ av_log(s, AV_LOG_WARNING, "drmWaitVBlank failed: %s\n", ERRSTR); ++ break; ++ } + } + } + -+ if (da == NULL) { -+ av_log(s, AV_LOG_INFO, "%s: Out of handles\n", __func__); -+ return AVERROR(EINVAL); -+ } ++ da_uninit(de, da); + -+ if (da->fd == -1) { ++ { + uint32_t pitches[4] = {0}; + uint32_t offsets[4] = {0}; + uint64_t modifiers[4] = {0}; -+ uint32_t bo_plane_handles[4] = {0}; ++ uint32_t bo_object_handles[4] = {0}; ++ uint32_t bo_handles[4] = {0}; + int i, j, n; + ++ da->frame = frame; ++ + for (i = 0; i < desc->nb_objects; ++i) { -+ if (drmPrimeFDToHandle(de->drm_fd, desc->objects[i].fd, da->bo_handles + i) != 0) { -+ av_log(s, AV_LOG_WARNING, "drmPrimeFDToHandle failed: %s\n", ERRSTR); ++ if (drmPrimeFDToHandle(de->drm_fd, desc->objects[i].fd, bo_object_handles + i) != 0) { ++ av_log(s, AV_LOG_WARNING, "drmPrimeFDToHandle[%d](%d) failed: %s\n", i, desc->objects[i].fd, ERRSTR); + return -1; + } + } @@ -52399,21 +53039,21 @@ index 0000000000..c427b60d30 + pitches[n] = p->pitch; + offsets[n] = p->offset; + modifiers[n] = obj->format_modifier; -+ bo_plane_handles[n] = da->bo_handles[p->object_index]; ++ bo_handles[n] = bo_object_handles[p->object_index]; + ++n; + } + } + -+#if 0 -+ av_log(s, AV_LOG_INFO, "%dx%d, fmt: %x, boh=%d,%d,%d,%d, pitch=%d,%d,%d,%d," ++#if 1 && TRACE_ALL ++ av_log(s, AV_LOG_DEBUG, "%dx%d, fmt: %x, boh=%d,%d,%d,%d, pitch=%d,%d,%d,%d," + " offset=%d,%d,%d,%d, mod=%llx,%llx,%llx,%llx\n", + av_frame_cropped_width(frame), + av_frame_cropped_height(frame), + desc->layers[0].format, -+ bo_plane_handles[0], -+ bo_plane_handles[1], -+ bo_plane_handles[2], -+ bo_plane_handles[3], ++ bo_handles[0], ++ bo_handles[1], ++ bo_handles[2], ++ bo_handles[3], + pitches[0], + pitches[1], + pitches[2], @@ -52432,14 +53072,12 @@ index 0000000000..c427b60d30 + if (drmModeAddFB2WithModifiers(de->drm_fd, + av_frame_cropped_width(frame), + av_frame_cropped_height(frame), -+ desc->layers[0].format, bo_plane_handles, ++ desc->layers[0].format, bo_handles, + pitches, offsets, modifiers, + &da->fb_handle, DRM_MODE_FB_MODIFIERS /** 0 if no mods */) != 0) { + av_log(s, AV_LOG_WARNING, "drmModeAddFB2WithModifiers failed: %s\n", ERRSTR); + return -1; + } -+ -+ da->fd = desc->objects[0].fd; + } + + ret = drmModeSetPlane(de->drm_fd, de->setup.planeId, de->setup.crtcId, @@ -52455,43 +53093,56 @@ index 0000000000..c427b60d30 + av_log(s, AV_LOG_WARNING, "drmModeSetPlane failed: %s\n", ERRSTR); + } + ++ de->ano = de->ano + 1 >= AUX_SIZE ? 0 : de->ano + 1; ++ + return ret; +} + ++static int do_sem_wait(sem_t * const sem, const int nowait) ++{ ++ while (nowait ? sem_trywait(sem) : sem_wait(sem)) { ++ if (errno != EINTR) ++ return -errno; ++ } ++ return 0; ++} ++ +static void * display_thread(void * v) +{ + AVFormatContext * const s = v; + drm_display_env_t * const de = s->priv_data; ++ int i; + +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "<<< %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "<<< %s\n", __func__); +#endif + ++ sem_post(&de->q_sem_out); ++ + for (;;) { + AVFrame * frame; + -+ while (sem_wait(&de->q_sem) != 0) { -+ av_assert0(errno == EINTR); -+ } ++ do_sem_wait(&de->q_sem_in, 0); + + if (de->q_terminate) + break; + -+ pthread_mutex_lock(&de->q_lock); + frame = de->q_next; + de->q_next = NULL; -+ pthread_mutex_unlock(&de->q_lock); ++ sem_post(&de->q_sem_out); + + do_display(s, de, frame); -+ -+ av_frame_free(&de->q_this); -+ de->q_this = frame; + } + +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, ">>> %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, ">>> %s\n", __func__); +#endif + ++ for (i = 0; i != AUX_SIZE; ++i) ++ da_uninit(de, de->aux + i); ++ ++ av_frame_free(&de->q_next); ++ + return NULL; +} + @@ -52500,13 +53151,14 @@ index 0000000000..c427b60d30 + const AVFrame * const src_frame = (AVFrame *)pkt->data; + AVFrame * frame; + drm_display_env_t * const de = s->priv_data; ++ int ret; + +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "%s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "%s\n", __func__); +#endif + + if ((src_frame->flags & AV_FRAME_FLAG_CORRUPT) != 0) { -+ av_log(s, AV_LOG_WARNING, "Discard corrupt frame: ts=%" PRId64 "\n", src_frame->format, src_frame->pts); ++ av_log(s, AV_LOG_WARNING, "Discard corrupt frame: fmt=%d, ts=%" PRId64 "\n", src_frame->format, src_frame->pts); + return 0; + } + @@ -52529,19 +53181,14 @@ index 0000000000..c427b60d30 + return AVERROR(EINVAL); + } + -+ -+ pthread_mutex_lock(&de->q_lock); -+ { -+ AVFrame * const t = de->q_next; -+ de->q_next = frame; -+ frame = t; -+ } -+ pthread_mutex_unlock(&de->q_lock); -+ -+ if (frame == NULL) -+ sem_post(&de->q_sem); -+ else ++ ret = do_sem_wait(&de->q_sem_out, !de->show_all); ++ if (ret) { + av_frame_free(&frame); ++ } ++ else { ++ de->q_next = frame; ++ sem_post(&de->q_sem_in); ++ } + + return 0; +} @@ -52550,7 +53197,7 @@ index 0000000000..c427b60d30 + unsigned flags) +{ +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "%s: idx=%d, flags=%#x\n", __func__, stream_index, flags); ++ av_log(s, AV_LOG_DEBUG, "%s: idx=%d, flags=%#x\n", __func__, stream_index, flags); +#endif + + /* drm_vout_write_header() should have accepted only supported formats */ @@ -52563,7 +53210,7 @@ index 0000000000..c427b60d30 +static int drm_vout_control_message(AVFormatContext *s, int type, void *data, size_t data_size) +{ +#if TRACE_ALL -+ av_log(s, AV_LOG_INFO, "%s: %d\n", __func__, type); ++ av_log(s, AV_LOG_DEBUG, "%s: %d\n", __func__, type); +#endif + switch(type) { + case AV_APP_TO_DEV_WINDOW_REPAINT: @@ -52615,7 +53262,7 @@ index 0000000000..c427b60d30 + s->crtcId = crtc->crtc_id; + } + -+ av_log(avctx, AV_LOG_INFO, "Connector %d (crtc %d): type %d, %dx%d%s\n", ++ av_log(avctx, AV_LOG_DEBUG, "Connector %d (crtc %d): type %d, %dx%d%s\n", + con->connector_id, + crtc ? crtc->crtc_id : 0, + con->connector_type, @@ -52688,138 +53335,81 @@ index 0000000000..c427b60d30 + return ret; +} + -+static int find_plane(struct AVFormatContext * const avctx, int drmfd, struct drm_setup *s) -+{ -+ drmModePlaneResPtr planes; -+ drmModePlanePtr plane; -+ unsigned int i; -+ unsigned int j; -+ int ret = 0; -+ -+ planes = drmModeGetPlaneResources(drmfd); -+ if (!planes) -+ { -+ av_log(avctx, AV_LOG_WARNING, "drmModeGetPlaneResources failed: %s\n", ERRSTR); -+ return -1; -+ } -+ -+ for (i = 0; i < planes->count_planes; ++i) { -+ plane = drmModeGetPlane(drmfd, planes->planes[i]); -+ if (!planes) -+ { -+ av_log(avctx, AV_LOG_WARNING, "drmModeGetPlane failed: %s\n", ERRSTR); -+ break; -+ } -+ -+ if (!(plane->possible_crtcs & (1 << s->crtcIdx))) { -+ drmModeFreePlane(plane); -+ continue; -+ } -+ -+ for (j = 0; j < plane->count_formats; ++j) { -+ if (plane->formats[j] == s->out_fourcc) -+ break; -+ } -+ -+ if (j == plane->count_formats) { -+ drmModeFreePlane(plane); -+ continue; -+ } -+ -+ s->planeId = plane->plane_id; -+ drmModeFreePlane(plane); -+ break; -+ } -+ -+ if (i == planes->count_planes) -+ ret = -1; -+ -+ drmModeFreePlaneResources(planes); -+ return ret; -+} -+ +// deinit is called if init fails so no need to clean up explicity here +static int drm_vout_init(struct AVFormatContext * s) +{ + drm_display_env_t * const de = s->priv_data; -+ unsigned int i; ++ int rv; ++ const char * drm_module = DRM_MODULE; + -+ av_log(s, AV_LOG_INFO, "<<< %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "<<< %s\n", __func__); + + de->drm_fd = -1; + de->con_id = 0; + de->setup = (struct drm_setup){0}; ++ de->q_terminate = 0; + -+ de->setup.out_fourcc = DRM_FORMAT_NV12; // **** Need some sort of select -+ -+ for (i = 0; i != 32; ++i) { -+ de->aux[i].fd = -1; -+ } -+ -+ if ((de->drm_fd = drmOpen(DRM_MODULE, NULL)) < 0) ++ if ((de->drm_fd = drmOpen(drm_module, NULL)) < 0) + { -+ av_log(s, AV_LOG_ERROR, "Failed to drmOpen %s\n", DRM_MODULE); -+ return -1; ++ rv = AVERROR(errno); ++ av_log(s, AV_LOG_ERROR, "Failed to drmOpen %s: %s\n", drm_module, av_err2str(rv)); ++ return rv; + } + + if (find_crtc(s, de->drm_fd, &de->setup, &de->con_id) != 0) + { + av_log(s, AV_LOG_ERROR, "failed to find valid mode\n"); -+ return -1; ++ rv = AVERROR(EINVAL); ++ goto fail_close; + } + -+ if (find_plane(s, de->drm_fd, &de->setup) != 0) -+ { -+ av_log(s, AV_LOG_ERROR, "failed to find compatible plane\n"); -+ return -1; ++ sem_init(&de->q_sem_in, 0, 0); ++ sem_init(&de->q_sem_out, 0, 0); ++ if (pthread_create(&de->q_thread, NULL, display_thread, s)) { ++ rv = AVERROR(errno); ++ av_log(s, AV_LOG_ERROR, "Failed to creatye display thread: %s\n", av_err2str(rv)); ++ goto fail_close; + } + -+ de->q_terminate = 0; -+ pthread_mutex_init(&de->q_lock, NULL); -+ sem_init(&de->q_sem, 0, 0); -+ av_assert0(pthread_create(&de->q_thread, NULL, display_thread, s) == 0); -+ -+ av_log(s, AV_LOG_INFO, ">>> %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, ">>> %s\n", __func__); + + return 0; ++ ++fail_close: ++ close(de->drm_fd); ++ de->drm_fd = -1; ++ av_log(s, AV_LOG_DEBUG, ">>> %s: FAIL\n", __func__); ++ ++ return rv; +} + +static void drm_vout_deinit(struct AVFormatContext * s) +{ + drm_display_env_t * const de = s->priv_data; + -+ av_log(s, AV_LOG_INFO, "<<< %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, "<<< %s\n", __func__); + + de->q_terminate = 1; -+ sem_post(&de->q_sem); ++ sem_post(&de->q_sem_in); + pthread_join(de->q_thread, NULL); -+ sem_destroy(&de->q_sem); -+ pthread_mutex_destroy(&de->q_lock); ++ sem_destroy(&de->q_sem_in); ++ sem_destroy(&de->q_sem_out); + + av_frame_free(&de->q_next); -+ av_frame_free(&de->q_this); + + if (de->drm_fd >= 0) { + close(de->drm_fd); + de->drm_fd = -1; + } + -+ av_log(s, AV_LOG_INFO, ">>> %s\n", __func__); ++ av_log(s, AV_LOG_DEBUG, ">>> %s\n", __func__); +} + + +#define OFFSET(x) offsetof(drm_display_env_t, x) +static const AVOption options[] = { -+#if 0 -+ { "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_id", "set existing window id", OFFSET(window_id), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+#endif ++ { "show_all", "show all frames", OFFSET(show_all), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, + { NULL } -+ +}; + +static const AVClass drm_vout_class = { @@ -52846,12 +53436,13 @@ index 0000000000..c427b60d30 + .init = drm_vout_init, + .deinit = drm_vout_deinit, +}; ++ diff --git a/libavdevice/egl_vout.c b/libavdevice/egl_vout.c new file mode 100644 -index 0000000000..d5b2e161d5 +index 0000000000..5d97fe9310 --- /dev/null +++ b/libavdevice/egl_vout.c -@@ -0,0 +1,805 @@ +@@ -0,0 +1,824 @@ +/* + * Copyright (c) 2020 John Cox for Raspberry Pi Trading + * @@ -52938,6 +53529,11 @@ index 0000000000..d5b2e161d5 + struct egl_setup setup; + enum AVPixelFormat avfmt; + ++ int show_all; ++ int window_width, window_height; ++ int window_x, window_y; ++ int fullscreen; ++ + egl_aux_t aux[32]; + + pthread_t q_thread; @@ -53003,8 +53599,8 @@ index 0000000000..d5b2e161d5 + */ +static int +make_window(struct AVFormatContext * const s, ++ egl_display_env_t * const de, + Display *dpy, EGLDisplay egl_dpy, const char *name, -+ int x, int y, int width, int height, + Window *winRet, EGLContext *ctxRet, EGLSurface *surfRet) +{ + int scrnum = DefaultScreen( dpy ); @@ -53013,8 +53609,13 @@ index 0000000000..d5b2e161d5 + Window root = RootWindow( dpy, scrnum ); + Window win; + EGLContext ctx; -+ bool fullscreen = false; /* Hook this up to a command line arg */ ++ const int fullscreen = de->fullscreen; + EGLConfig config; ++ int x = de->window_x; ++ int y = de->window_y; ++ int width = de->window_width ? de->window_width : 1280; ++ int height = de->window_height ? de->window_height : 720; ++ + + if (fullscreen) { + int scrnum = DefaultScreen(dpy); @@ -53403,6 +54004,10 @@ index 0000000000..d5b2e161d5 + glDrawArrays(GL_TRIANGLE_FAN, 0, 4); + eglSwapBuffers(de->setup.egl_dpy, de->setup.surf); + ++ glDeleteTextures(1, &da->texture); ++ da->texture = 0; ++ da->fd = -1; ++ + return 0; +} + @@ -53442,8 +54047,12 @@ index 0000000000..d5b2e161d5 + } + } + -+ if (make_window(s, de->setup.dpy, de->setup.egl_dpy, "ffmpeg-vout", -+ 0, 0, 1280, 720, &de->setup.win, &de->setup.ctx, &de->setup.surf)) { ++ if (!de->window_width || !de->window_height) { ++ de->window_width = 1280; ++ de->window_height = 720; ++ } ++ if (make_window(s, de, de->setup.dpy, de->setup.egl_dpy, "ffmpeg-vout", ++ &de->setup.win, &de->setup.ctx, &de->setup.surf)) { + av_log(s, AV_LOG_ERROR, "%s: make_window failed\n", __func__); + goto fail; + } @@ -53524,6 +54133,10 @@ index 0000000000..d5b2e161d5 + return AVERROR(EINVAL); + } + ++ // Really hacky sync ++ while (de->show_all && de->q_next) { ++ usleep(3000); ++ } + + pthread_mutex_lock(&de->q_lock); + { @@ -53620,14 +54233,11 @@ index 0000000000..d5b2e161d5 + +#define OFFSET(x) offsetof(egl_display_env_t, x) +static const AVOption options[] = { -+#if 0 -+ { "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_id", "set existing window id", OFFSET(window_id), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+ { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, -+#endif ++ { "show_all", "show all frames", OFFSET(show_all), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, ++ { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, ++ { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, ++ { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, ++ { "fullscreen", "set fullscreen display", OFFSET(fullscreen), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, + { NULL } + +}; @@ -53659,10 +54269,10 @@ index 0000000000..d5b2e161d5 + diff --git a/libavdevice/rpi_vout.c b/libavdevice/rpi_vout.c new file mode 100644 -index 0000000000..60fe8a7075 +index 0000000000..b1617d48a8 --- /dev/null +++ b/libavdevice/rpi_vout.c -@@ -0,0 +1,534 @@ +@@ -0,0 +1,533 @@ +/* + * Copyright (c) 2013 Jeff Moguillansky + * @@ -53718,7 +54328,6 @@ index 0000000000..60fe8a7075 + +#define TRACE_ALL 0 + -+#define RPI_DISPLAY_ALL 0 +#define DISPLAY_PORT_DEPTH 4 + +typedef struct rpi_display_env_s @@ -53741,6 +54350,7 @@ index 0000000000..60fe8a7075 + int window_width, window_height; + int window_x, window_y; + int layer, fullscreen; ++ int show_all; +} rpi_display_env_t; + + @@ -53887,11 +54497,9 @@ index 0000000000..60fe8a7075 + buf->length = av_rpi_zc_length(fr_buf); + buf->alloc_size = av_rpi_zc_numbytes(fr_buf); + -+#if RPI_DISPLAY_ALL -+ while (atomic_load(&de->rpi_display_count) >= DISPLAY_PORT_DEPTH - 1) { ++ while (de->show_all && atomic_load(&de->rpi_display_count) >= DISPLAY_PORT_DEPTH - 1) { + usleep(5000); + } -+#endif + + { + MMAL_ES_SPECIFIC_FORMAT_T new_ess = {.video = {0}}; @@ -54164,6 +54772,7 @@ index 0000000000..60fe8a7075 + +#define OFFSET(x) offsetof(rpi_display_env_t, x) +static const AVOption options[] = { ++ { "show_all", "show all frames", OFFSET(show_all), AV_OPT_TYPE_BOOL, {.i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, + { "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, + { "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, + { "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, @@ -54613,10 +55222,10 @@ index 0000000000..fbea56dd09 +}; + diff --git a/libavformat/utils.c b/libavformat/utils.c -index ba8aaebfb7..4c7bd7f5e1 100644 +index 7185fbfd71..c7b0553903 100644 --- a/libavformat/utils.c +++ b/libavformat/utils.c -@@ -3044,6 +3044,40 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) +@@ -3048,6 +3048,40 @@ static int has_codec_parameters(AVStream *st, const char **errmsg_ptr) return 1; } @@ -54657,7 +55266,7 @@ index ba8aaebfb7..4c7bd7f5e1 100644 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */ static int try_decode_frame(AVFormatContext *s, AVStream *st, const AVPacket *avpkt, AVDictionary **options) -@@ -3078,7 +3112,11 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, +@@ -3082,7 +3116,11 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, av_dict_set(options ? options : &thread_opt, "threads", "1", 0); if (s->codec_whitelist) av_dict_set(options ? options : &thread_opt, "codec_whitelist", s->codec_whitelist, 0); @@ -54670,7 +55279,7 @@ index ba8aaebfb7..4c7bd7f5e1 100644 if (!options) av_dict_free(&thread_opt); if (ret < 0) { -@@ -3109,6 +3147,14 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, +@@ -3113,6 +3151,14 @@ static int try_decode_frame(AVFormatContext *s, AVStream *st, if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) { ret = avcodec_send_packet(avctx, &pkt); @@ -54685,7 +55294,7 @@ index ba8aaebfb7..4c7bd7f5e1 100644 if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) break; if (ret >= 0) -@@ -3719,9 +3765,20 @@ FF_ENABLE_DEPRECATION_WARNINGS +@@ -3723,9 +3769,20 @@ FF_ENABLE_DEPRECATION_WARNINGS // Try to just open decoders, in case this is enough to get parameters. if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) { if (codec && !avctx->codec) @@ -54710,7 +55319,7 @@ index ba8aaebfb7..4c7bd7f5e1 100644 if (!options) av_dict_free(&thread_opt); diff --git a/libavutil/Makefile b/libavutil/Makefile -index 9b08372eb2..b0b5be0fa6 100644 +index 9b08372eb2..c965c2dc85 100644 --- a/libavutil/Makefile +++ b/libavutil/Makefile @@ -68,6 +68,7 @@ HEADERS = adler32.h \ @@ -54729,7 +55338,7 @@ index 9b08372eb2..b0b5be0fa6 100644 ARCH_HEADERS = bswap.h \ intmath.h \ -@@ -180,10 +182,12 @@ OBJS-$(CONFIG_LZO) += lzo.o +@@ -180,6 +182,7 @@ OBJS-$(CONFIG_LZO) += lzo.o OBJS-$(CONFIG_MEDIACODEC) += hwcontext_mediacodec.o OBJS-$(CONFIG_OPENCL) += hwcontext_opencl.o OBJS-$(CONFIG_QSV) += hwcontext_qsv.o @@ -54737,11 +55346,6 @@ index 9b08372eb2..b0b5be0fa6 100644 OBJS-$(CONFIG_VAAPI) += hwcontext_vaapi.o OBJS-$(CONFIG_VIDEOTOOLBOX) += hwcontext_videotoolbox.o OBJS-$(CONFIG_VDPAU) += hwcontext_vdpau.o - OBJS-$(CONFIG_VULKAN) += hwcontext_vulkan.o -+OBJS-$(CONFIG_RPI) += rpi_sand_fns.o - - OBJS += $(COMPAT_OBJS:%=../compat/%) - diff --git a/libavutil/aarch64/Makefile b/libavutil/aarch64/Makefile index 5613813ba8..ab8bcfcf34 100644 --- a/libavutil/aarch64/Makefile @@ -54756,10 +55360,10 @@ index 5613813ba8..ab8bcfcf34 100644 + diff --git a/libavutil/aarch64/rpi_sand_neon.S b/libavutil/aarch64/rpi_sand_neon.S new file mode 100644 -index 0000000000..5922d6eaf5 +index 0000000000..2169d4f0ed --- /dev/null +++ b/libavutil/aarch64/rpi_sand_neon.S -@@ -0,0 +1,681 @@ +@@ -0,0 +1,676 @@ +/* +Copyright (c) 2021 Michael Eiler + @@ -54839,6 +55443,10 @@ index 0000000000..5922d6eaf5 + + // w14 = 0, processed block count + eor w14, w14, w14 ++ ++ cmp w8, #0 ++ beq no_main_y8 ++ +block_loop: + // copy 128 bytes (a full block) into the vector registers v0-v7 and increase the src address by 128 + // fortunately these aren't callee saved ones, meaning we don't need to backup them @@ -54860,6 +55468,7 @@ index 0000000000..5922d6eaf5 + + // handle the last block at the end of each row + // at most 127 byte values copied from src to dst ++no_main_y8: + eor w5, w5, w5 // i = 0 +incomplete_block_loop_y8: + cmp w5, w9 @@ -54907,6 +55516,7 @@ index 0000000000..5922d6eaf5 + ldr w7, [sp, #8] + + // w15 contains the number of rows we need to process ++ // counts down + ldr w15, [sp, #16] + + // number of full blocks, w8 = _w / (stride1 >> 1) == _w / 64 == _w >> 6 @@ -54918,6 +55528,9 @@ index 0000000000..5922d6eaf5 + lsl w9, w8, #6 + sub w9, w7, w9 + ++ // Skip at the end of the line to account for stride ++ sub w12, w1, w7 ++ + // address delta to the beginning of the next block + // w10 = (stride2 * stride1 - stride1) = stride2 * 128 - 128 + lsl w10, w6, #7 @@ -54926,8 +55539,6 @@ index 0000000000..5922d6eaf5 + // w11 = row address start offset = 0 + eor w11, w11, w11 + -+ // w12 = 0, row counter -+ eor w12, w12, w12 +row_loop_c8: + // start of the first block within the current row + // x13 = row offset + src @@ -54936,6 +55547,10 @@ index 0000000000..5922d6eaf5 + + // w14 = 0, processed block count + eor w14, w14, w14 ++ ++ cmp w8, #0 ++ beq no_main_c8 ++ +block_loop_c8: + // load the full block -> 128 bytes, the block contains 64 interleaved U and V values + ld2 { v0.16b, v1.16b }, [x13], #32 @@ -54965,6 +55580,7 @@ index 0000000000..5922d6eaf5 + cmp w8, w14 + bgt block_loop_c8 + ++no_main_c8: + // handle incomplete block at the end of every row + eor w5, w5, w5 // point counter, this might be +incomplete_block_loop_c8: @@ -54986,13 +55602,13 @@ index 0000000000..5922d6eaf5 + b incomplete_block_loop_c8 +incomplete_block_loop_end_c8: + -+ + // increase row_offset by stride1 + add w11, w11, #128 -+ add w12, w12, #1 ++ add x0, x0, w12, sxtw ++ add x2, x2, w12, sxtw + + // jump to row_Loop_c8 iff the row count is small than the height -+ cmp w15, w12 ++ subs w15, w15, #1 + bgt row_loop_c8 + + ret @@ -55010,14 +55626,12 @@ index 0000000000..5922d6eaf5 +// unsigned int h); // [sp, #0] + +function ff_rpi_sand30_lines_to_planar_y16, export=1 -+ str x19, [sp, #-8] -+ str x20, [sp, #-16] -+ str x21, [sp, #-24] -+ str x22, [sp, #-32] -+ str x23, [sp, #-40] -+ ++ stp x19, x20, [sp, #-48]! ++ stp x21, x22, [sp, #16] ++ stp x23, x24, [sp, #32] ++ + // w6 = argument h -+ ldr w6, [sp, #0] ++ ldr w6, [sp, #48] + + // slice_inc = ((stride2 - 1) * stride1) + mov w5, w4 @@ -55055,18 +55669,10 @@ index 0000000000..5922d6eaf5 + + // store the number of bytes in w20 which we copy too much for every row + // when the width of the frame is not a multiple of 96 (128bytes storing 96 10bit values) -+ mov w20, #3 -+ mul w21, w10, w20 -+ mov w20, #96 -+ sub w20, w20, w21 // w20 = 96 - #integers*3 -+ sub w20, w20, w11 // w20 = 96 - #integers*3 - rem. points -+ cmp w20, #96 -+ cset w21, eq -+ mov w23, #96 -+ mul w23, w23, w21 // 0 or 1 * 96 -+ sub w20, w20, w23 // = w20 mod 96 -+ lsl w20, w20, #1 // convert to bytes (*2 since we store 16bits per value) -+ ++ mov w20, #96*2 ++ mul w20, w20, w9 ++ sub w20, w1, w20 ++ + mov w23, #0 // flag to check whether the last line had already been processed + + // bitmask to clear the uppper 6bits of the result values @@ -55172,7 +55778,7 @@ index 0000000000..5922d6eaf5 + + + add x2, x2, #128 // src += stride1 (start of the next row) -+ sub x0, x0, x20 // subtract the bytes we copied too much from dst ++ add x0, x0, w20, sxtw // subtract the bytes we copied too much from dst + add w12, w12, #1 + b row_loop_y16 +row_loop_y16_fin: @@ -55190,7 +55796,7 @@ index 0000000000..5922d6eaf5 + b row_loop_y16 +row_loop_y16_fin2: + -+ add x0, x0, x20 // with the last row we didn't actually move the dst ptr to far ahead, therefore readd the diference ++ sub x0, x0, x20, sxtw // with the last row we didn't actually move the dst ptr to far ahead, therefore readd the diference + + // now we've got to handle the last block in the last row + eor w12, w12, w12 // w12 = 0 = counter @@ -55224,12 +55830,9 @@ index 0000000000..5922d6eaf5 + strh w15, [x0], #2 +final_values_y16_fin: + -+ ldr x23, [sp, #-40] -+ ldr x22, [sp, #-32] -+ ldr x21, [sp, #-24] -+ ldr x20, [sp, #-16] -+ ldr x19, [sp, #-8] -+ ++ ldp x23, x24, [sp, #32] ++ ldp x21, x22, [sp, #16] ++ ldp x19, x20, [sp], #48 + ret +endfunc + @@ -55282,14 +55885,12 @@ index 0000000000..5922d6eaf5 +.endm + +function ff_rpi_sand30_lines_to_planar_c16, export=1 -+ str x19, [sp, #-8] -+ str x20, [sp, #-16] -+ str x21, [sp, #-24] -+ str x22, [sp, #-32] -+ str x23, [sp, #-40] ++ stp x19, x20, [sp, #-48]! ++ stp x21, x22, [sp, #16] ++ stp x23, x24, [sp, #32] + -+ ldr w3, [sp, #8] // w3 = width -+ ldr w7, [sp, #16] // w7 = height ++ ldr w3, [sp, #48+8] // w3 = width ++ ldr w7, [sp, #48+16] // w7 = height + + // reserve space on the stack for intermediate results + sub sp, sp, #256 @@ -55316,10 +55917,9 @@ index 0000000000..5922d6eaf5 + add w8, w8, w19 + + // bytes we have to move dst back by at the end of every row -+ mov w21, #48 -+ mul w21, w21, w19 -+ sub w21, w21, w9 -+ lsl w21, w21, #1 // w21 = (#48 * w19 - rem_pix) * 2 ++ mov w21, #48*2 ++ mul w21, w21, w8 ++ sub w21, w1, w21 + + mov w20, #0 // w20 = flag, last row processed + @@ -55376,8 +55976,8 @@ index 0000000000..5922d6eaf5 + + add w10, w10, #128 + add w12, w12, #1 -+ sub x0, x0, x21 // move dst pointers back by x21 -+ sub x2, x2, x21 ++ add x0, x0, w21, sxtw // move dst pointers back by x21 ++ add x2, x2, w21, sxtw + b row_loop_c16 +row_loop_c16_fin: + @@ -55389,8 +55989,8 @@ index 0000000000..5922d6eaf5 + b row_loop_c16 + +row_loop_c16_fin2: -+ add x0, x0, x21 // readd x21 in case of the last row -+ add x2, x2, x21 // so that we can write out the few remaining pixels ++ sub x0, x0, w21, sxtw // readd x21 in case of the last row ++ sub x2, x2, w21, sxtw // so that we can write out the few remaining pixels + + // last incomplete block to be finished + // read operations are fine, stride2 is more than large enough even if rem_pix is 0 @@ -55420,11 +56020,10 @@ index 0000000000..5922d6eaf5 +rem_pix_c16_fin: + + add sp, sp, #256 -+ ldr x23, [sp, #-40] -+ ldr x22, [sp, #-32] -+ ldr x21, [sp, #-24] -+ ldr x20, [sp, #-16] -+ ldr x19, [sp, #-8] ++ ++ ldp x23, x24, [sp, #32] ++ ldp x21, x22, [sp, #16] ++ ldp x19, x20, [sp], #48 + ret +endfunc + @@ -56390,46 +56989,6 @@ index 0000000000..447f367bea + +#endif // AVUTIL_ARM_SAND_NEON_H + -diff --git a/libavutil/buffer.c b/libavutil/buffer.c -index 38a554208a..b0fedabc3e 100644 ---- a/libavutil/buffer.c -+++ b/libavutil/buffer.c -@@ -273,6 +273,19 @@ static void buffer_pool_free(AVBufferPool *pool) - av_freep(&pool); - } - -+void av_buffer_pool_flush(AVBufferPool *pool) -+{ -+ ff_mutex_lock(&pool->mutex); -+ while (pool->pool) { -+ BufferPoolEntry *buf = pool->pool; -+ pool->pool = buf->next; -+ -+ buf->free(buf->opaque, buf->data); -+ av_freep(&buf); -+ } -+ ff_mutex_unlock(&pool->mutex); -+} -+ - void av_buffer_pool_uninit(AVBufferPool **ppool) - { - AVBufferPool *pool; -diff --git a/libavutil/buffer.h b/libavutil/buffer.h -index c0f3f6cc9a..998beec9ac 100644 ---- a/libavutil/buffer.h -+++ b/libavutil/buffer.h -@@ -267,6 +267,11 @@ AVBufferPool *av_buffer_pool_init2(int size, void *opaque, - AVBufferRef* (*alloc)(void *opaque, int size), - void (*pool_free)(void *opaque)); - -+/** -+ * Free all available buffers in a buffer pool. -+ */ -+ void av_buffer_pool_flush(AVBufferPool *pool); -+ - /** - * Mark the pool as being available for freeing. It will actually be freed only - * once all the allocated buffers associated with the pool are released. Thus it diff --git a/libavutil/frame.c b/libavutil/frame.c index 2e952edd29..96e8bf5b3e 100644 --- a/libavutil/frame.c @@ -56488,26 +57047,30 @@ index fc67db0f6c..b1a7eb4858 100644 * @} */ diff --git a/libavutil/hwcontext_drm.c b/libavutil/hwcontext_drm.c -index 32cbde82eb..9ba8b7b2dd 100644 +index 32cbde82eb..c897dfade7 100644 --- a/libavutil/hwcontext_drm.c +++ b/libavutil/hwcontext_drm.c @@ -21,6 +21,7 @@ #include #include -+#include ++#include #include #include "avassert.h" -@@ -28,6 +29,7 @@ +@@ -28,6 +29,11 @@ #include "hwcontext_drm.h" #include "hwcontext_internal.h" #include "imgutils.h" +#include "libavutil/rpi_sand_fns.h" ++ ++#include ++#include ++#include static void drm_device_free(AVHWDeviceContext *hwdev) -@@ -43,6 +45,11 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, +@@ -43,6 +49,11 @@ static int drm_device_create(AVHWDeviceContext *hwdev, const char *device, AVDRMDeviceContext *hwctx = hwdev->hwctx; drmVersionPtr version; @@ -56519,17 +57082,78 @@ index 32cbde82eb..9ba8b7b2dd 100644 hwctx->fd = open(device, O_RDWR); if (hwctx->fd < 0) return AVERROR(errno); -@@ -120,6 +127,9 @@ static int drm_map_frame(AVHWFramesContext *hwfc, - if (flags & AV_HWFRAME_MAP_WRITE) - mmap_prot |= PROT_WRITE; +@@ -85,18 +96,37 @@ static int drm_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame) + typedef struct DRMMapping { + // Address and length of each mmap()ed region. + int nb_regions; ++ unsigned int dmaflags; + void *address[AV_DRM_MAX_PLANES]; + size_t length[AV_DRM_MAX_PLANES]; ++ int fds[AV_DRM_MAX_PLANES]; + } DRMMapping; ++static int dmasync(const int fd, const unsigned int flags) ++{ ++ struct dma_buf_sync sync = { ++ .flags = flags ++ }; ++ while (ioctl(fd, DMA_BUF_IOCTL_SYNC, &sync) == -1) { ++ const int err = errno; ++ if (errno == EINTR) ++ continue; ++ av_log(NULL, AV_LOG_WARNING, "%s: ioctl failed: flags=%#x\n", __func__, flags); ++ return -err; ++ } ++ return 0; ++} ++ + static void drm_unmap_frame(AVHWFramesContext *hwfc, + HWMapDescriptor *hwmap) + { + DRMMapping *map = hwmap->priv; + int i; + +- for (i = 0; i < map->nb_regions; i++) ++ for (i = 0; i < map->nb_regions; i++) { + munmap(map->address[i], map->length[i]); ++ dmasync(map->fds[i], DMA_BUF_SYNC_END | map->dmaflags); ++ } + + av_free(map); + } +@@ -114,15 +144,28 @@ static int drm_map_frame(AVHWFramesContext *hwfc, + if (!map) + return AVERROR(ENOMEM); + ++ for (i = 0; i < AV_DRM_MAX_PLANES; i++) ++ map->fds[i] = -1; ++ + mmap_prot = 0; +- if (flags & AV_HWFRAME_MAP_READ) ++ if (flags & AV_HWFRAME_MAP_READ) { ++ map->dmaflags |= DMA_BUF_SYNC_READ; + mmap_prot |= PROT_READ; +- if (flags & AV_HWFRAME_MAP_WRITE) ++ } ++ if (flags & AV_HWFRAME_MAP_WRITE) { ++ map->dmaflags |= DMA_BUF_SYNC_WRITE; + mmap_prot |= PROT_WRITE; ++ } ++ + if (dst->format == AV_PIX_FMT_NONE) + dst->format = hwfc->sw_format; -+ + av_assert0(desc->nb_objects <= AV_DRM_MAX_PLANES); for (i = 0; i < desc->nb_objects; i++) { - addr = mmap(NULL, desc->objects[i].size, mmap_prot, MAP_SHARED, -@@ -151,6 +161,23 @@ static int drm_map_frame(AVHWFramesContext *hwfc, +- addr = mmap(NULL, desc->objects[i].size, mmap_prot, MAP_SHARED, ++ dmasync(desc->objects[i].fd, DMA_BUF_SYNC_START | map->dmaflags); ++ map->fds[i] = desc->objects[i].fd; ++ ++ addr = mmap(NULL, desc->objects[i].size, mmap_prot, MAP_SHARED | MAP_POPULATE, + desc->objects[i].fd, 0); + if (addr == MAP_FAILED) { + err = AVERROR(errno); +@@ -151,6 +194,23 @@ static int drm_map_frame(AVHWFramesContext *hwfc, dst->width = src->width; dst->height = src->height; @@ -56553,7 +57177,18 @@ index 32cbde82eb..9ba8b7b2dd 100644 err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src, &drm_unmap_frame, map); -@@ -178,7 +205,15 @@ static int drm_transfer_get_formats(AVHWFramesContext *ctx, +@@ -160,7 +220,9 @@ static int drm_map_frame(AVHWFramesContext *hwfc, + return 0; + + fail: +- for (i = 0; i < desc->nb_objects; i++) { ++ for (i = 0; i < AV_DRM_MAX_PLANES; i++) { ++ if (map->fds[i] != -1) ++ dmasync(map->fds[i], DMA_BUF_SYNC_END | map->dmaflags); + if (map->address[i]) + munmap(map->address[i], map->length[i]); + } +@@ -178,7 +240,15 @@ static int drm_transfer_get_formats(AVHWFramesContext *ctx, if (!pix_fmts) return AVERROR(ENOMEM); @@ -56570,7 +57205,7 @@ index 32cbde82eb..9ba8b7b2dd 100644 pix_fmts[1] = AV_PIX_FMT_NONE; *formats = pix_fmts; -@@ -197,18 +232,82 @@ static int drm_transfer_data_from(AVHWFramesContext *hwfc, +@@ -197,18 +267,82 @@ static int drm_transfer_data_from(AVHWFramesContext *hwfc, map = av_frame_alloc(); if (!map) return AVERROR(ENOMEM); @@ -56586,7 +57221,7 @@ index 32cbde82eb..9ba8b7b2dd 100644 - map->height = dst->height; +#if 0 + av_log(hwfc, AV_LOG_INFO, "%s: src fmt=%d (%d), dst fmt=%d (%d) s=%dx%d l=%d/%d/%d/%d, d=%dx%d l=%d/%d/%d\n", __func__, -+ hwfc->sw_format, AV_PIX_FMT_RPI4_8, dst->format, AV_PIX_FMT_YUV420P10LE, ++ map->hwfc_format, AV_PIX_FMT_RPI4_8, dst->format, AV_PIX_FMT_YUV420P10LE, + map->width, map->height, + map->linesize[0], + map->linesize[1], @@ -56657,7 +57292,7 @@ index 32cbde82eb..9ba8b7b2dd 100644 err = 0; fail: -@@ -223,7 +322,10 @@ static int drm_transfer_data_to(AVHWFramesContext *hwfc, +@@ -223,7 +357,10 @@ static int drm_transfer_data_to(AVHWFramesContext *hwfc, int err; if (src->width > hwfc->width || src->height > hwfc->height) @@ -56669,29 +57304,10 @@ index 32cbde82eb..9ba8b7b2dd 100644 map = av_frame_alloc(); if (!map) diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c -index 9d61c52567..4e36a110c1 100644 +index 9d61c52567..1cda09f53c 100644 --- a/libavutil/pixdesc.c +++ b/libavutil/pixdesc.c -@@ -2073,6 +2073,18 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = { - .name = "cuda", - .flags = AV_PIX_FMT_FLAG_HWACCEL, - }, -+ [AV_PIX_FMT_RPI] = { -+ .name = "rpi", -+ .flags = AV_PIX_FMT_FLAG_HWACCEL, -+ }, -+ [AV_PIX_FMT_RPI4_10] = { -+ .name = "rpi", -+ .flags = AV_PIX_FMT_FLAG_HWACCEL, -+ }, -+ [AV_PIX_FMT_RPI4_8] = { -+ .name = "rpi", -+ .flags = AV_PIX_FMT_FLAG_HWACCEL, -+ }, - [AV_PIX_FMT_AYUV64LE] = { - .name = "ayuv64le", - .nb_components = 4, -@@ -2371,6 +2383,30 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = { +@@ -2371,6 +2371,38 @@ static const AVPixFmtDescriptor av_pix_fmt_descriptors[AV_PIX_FMT_NB] = { .name = "vulkan", .flags = AV_PIX_FMT_FLAG_HWACCEL, }, @@ -56718,27 +57334,23 @@ index 9d61c52567..4e36a110c1 100644 + { 1, 4, 1, 0, 10, 1, 9, 2 }, /* V */ + }, + .flags = 0, ++ }, ++ [AV_PIX_FMT_RPI4_8] = { ++ .name = "rpi", ++ .flags = AV_PIX_FMT_FLAG_HWACCEL, ++ }, ++ [AV_PIX_FMT_RPI4_10] = { ++ .name = "rpi", ++ .flags = AV_PIX_FMT_FLAG_HWACCEL, + }, }; #if FF_API_PLUS1_MINUS1 FF_ENABLE_DEPRECATION_WARNINGS diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h -index 1c625cfc8a..3400390a77 100644 +index 1c625cfc8a..e7f543b797 100644 --- a/libavutil/pixfmt.h +++ b/libavutil/pixfmt.h -@@ -234,6 +234,11 @@ enum AVPixelFormat { - */ - AV_PIX_FMT_CUDA, - -+ /** -+ * HW acceleration through RPI. -+ */ -+ AV_PIX_FMT_RPI, -+ - AV_PIX_FMT_0RGB, ///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined - AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined - AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined -@@ -357,6 +362,12 @@ enum AVPixelFormat { +@@ -357,6 +357,12 @@ enum AVPixelFormat { AV_PIX_FMT_Y210BE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, big-endian AV_PIX_FMT_Y210LE, ///< packed YUV 4:2:2 like YUYV422, 20bpp, data in the high bits, little-endian @@ -56753,7 +57365,7 @@ index 1c625cfc8a..3400390a77 100644 }; diff --git a/libavutil/rpi_sand_fn_pw.h b/libavutil/rpi_sand_fn_pw.h new file mode 100644 -index 0000000000..0324f6826d +index 0000000000..0d5d203dc3 --- /dev/null +++ b/libavutil/rpi_sand_fn_pw.h @@ -0,0 +1,227 @@ @@ -56813,7 +57425,7 @@ index 0000000000..0324f6826d + const unsigned int w = _w; + const unsigned int mask = stride1 - 1; + -+#if PW == 1 && (HAVE_SAND_ASM || HAVE_SAND_ASM64) ++#if PW == 1 && HAVE_SAND_ASM + if (_x == 0) { + ff_rpi_sand8_lines_to_planar_y8(dst, dst_stride, + src, stride1, stride2, _x, y, _w, h); @@ -56865,7 +57477,7 @@ index 0000000000..0324f6826d + const unsigned int w = _w * 2; + const unsigned int mask = stride1 - 1; + -+#if PW == 1 && (HAVE_SAND_ASM || HAVE_SAND_ASM64) ++#if PW == 1 && HAVE_SAND_ASM + if (_x == 0) { + ff_rpi_sand8_lines_to_planar_c8(dst_u, dst_stride_u, dst_v, dst_stride_v, + src, stride1, stride2, _x, y, _w, h); @@ -56986,10 +57598,10 @@ index 0000000000..0324f6826d + diff --git a/libavutil/rpi_sand_fns.c b/libavutil/rpi_sand_fns.c new file mode 100644 -index 0000000000..4256adf9c8 +index 0000000000..1f543e9357 --- /dev/null +++ b/libavutil/rpi_sand_fns.c -@@ -0,0 +1,357 @@ +@@ -0,0 +1,356 @@ +/* +Copyright (c) 2018 Raspberry Pi (Trading) Ltd. +All rights reserved. @@ -57031,8 +57643,7 @@ index 0000000000..4256adf9c8 +#define HAVE_SAND_ASM 1 +#elif ARCH_AARCH64 && HAVE_NEON +#include "aarch64/rpi_sand_neon.h" -+#define HAVE_SAND_ASM 0 -+#define HAVE_SAND_ASM64 1 ++#define HAVE_SAND_ASM 1 +#else +#define HAVE_SAND_ASM 0 +#endif @@ -57089,7 +57700,7 @@ index 0000000000..4256adf9c8 + const uint8_t * p0 = src + (x0 & mask) + y * stride1 + (x0 & ~mask) * stride2; + const unsigned int slice_inc = ((stride2 - 1) * stride1) >> 2; // RHS of a stripe to LHS of next in words + -+#if HAVE_SAND_ASM || HAVE_SAND_ASM64 ++#if HAVE_SAND_ASM + if (_x == 0) { + ff_rpi_sand30_lines_to_planar_y16(dst, dst_stride, src, stride1, stride2, _x, y, _w, h); + return; @@ -57155,7 +57766,7 @@ index 0000000000..4256adf9c8 + const uint8_t * p0 = src + (x0 & mask) + y * stride1 + (x0 & ~mask) * stride2; + const unsigned int slice_inc = ((stride2 - 1) * stride1) >> 2; // RHS of a stripe to LHS of next in words + -+#if HAVE_SAND_ASM || HAVE_SAND_ASM64 ++#if HAVE_SAND_ASM + if (_x == 0) { + ff_rpi_sand30_lines_to_planar_c16(dst_u, dst_stride_u, dst_v, dst_stride_v, + src, stride1, stride2, _x, y, _w, h); @@ -57646,6 +58257,116 @@ index 0000000000..fcce72226a +Display should be a proper device rather than a kludge in ffmpeg.c + + +diff --git a/pi-util/TESTMESA.txt b/pi-util/TESTMESA.txt +new file mode 100644 +index 0000000000..92bc13a3df +--- /dev/null ++++ b/pi-util/TESTMESA.txt +@@ -0,0 +1,82 @@ ++# Setup & Build instructions for testing Argon30 mesa support (on Pi4) ++ ++# These assume that the drm_mmal test for Sand8 has been built on this Pi ++# as build relies on many of the same files ++ ++# 1st get everything required to build ffmpeg ++# If sources aren't already enabled on your Pi then enable them ++sudo su ++sed "s/#deb-src/deb-src/" /etc/apt/sources.list > /tmp/sources.list ++sed "s/#deb-src/deb-src/" /etc/apt/sources.list.d/raspi.list > /tmp/raspi.list ++mv /tmp/sources.list /etc/apt/ ++mv /tmp/raspi.list /etc/apt/sources.list.d/ ++apt update ++ ++# Get dependancies ++sudo apt build-dep ffmpeg ++ ++sudo apt install meson libepoxy-dev libxcb-dri3-dev libxcb1-dev libx11-dev libx11-xcb-dev libdrm-dev ++ ++# Enable H265 V4L2 request decoder ++sudo su ++echo dtoverlay=rpivid-v4l2 >> /boot/config.txt ++# You may also want to add more CMA if you are going to try 4k videos ++# Change the dtoverlay=vc4-fkms-v3d line in config.txt to read ++# dtoverlay=vc4-fkms-v3d,cma-512 ++reboot ++# Check it has turned up ++ls -la /dev/video* ++# This should include video19 ++# crw-rw----+ 1 root video 81, 7 Aug 4 17:25 /dev/video19 ++ ++# Currently on the Pi the linux headers from the debian distro don't match ++# the kernel that we ship and we need to update them - hopefully this step ++# will be unneeded in the future ++sudo apt install git bc bison flex libssl-dev make ++git clone --depth=1 https://github.com/raspberrypi/linux --branch rpi-5.10.y ++cd linux ++KERNEL=kernel7l ++make bcm2711_defconfig ++make headers_install ++sudo cp -r usr/include/linux /usr/include ++cd .. ++ ++# Config - this builds a staticly linked ffmpeg which is easier for testing ++pi-util/conf_native.sh --noshared ++ ++# Build (this is a bit dull) ++# If you want to poke the source the libavdevice/egl_vout.c contains the ++# output code - ++cd out/armv7-static-rel ++ ++# Check that you have actually configured V4L2 request ++grep HEVC_V4L2REQUEST config.h ++# You are hoping for ++# #define CONFIG_HEVC_V4L2REQUEST_HWACCEL 1 ++# if you get 0 then the config has failed ++ ++make -j6 ++ ++# Grab test streams ++wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-h264.mkv ++wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc.mkv ++wget http://www.jell.yfish.us/media/jellyfish-3-mbps-hd-hevc-10bit.mkv ++ ++# Test i420 output (works currently) ++./ffmpeg -no_cvt_hw -vcodec h264_v4l2m2m -i jellyfish-3-mbps-hd-h264.mkv -f vout_egl - ++ ++# Test Sand8 output - doesn't currently work but should once you have ++# Sand8 working in drm_mmal. I can't guarantee that this will work as ++# I can't test this path with a known working format, but the debug looks ++# good. If this doesn't work & drm_mmal does with sand8 then come back to me ++# The "show_all 1" forces vout to display every frame otherwise it drops any ++# frame that would cause it to block ++./ffmpeg -no_cvt_hw -hwaccel drm -vcodec hevc -i jellyfish-3-mbps-hd-hevc.mkv -show_all 1 -f vout_egl - ++ ++# Test Sand30 - doesn't currently work ++# (Beware that when FFmpeg errors out it often leaves your teminal window ++# in a state where you need to reset it) ++./ffmpeg -no_cvt_hw -hwaccel drm -vcodec hevc -i jellyfish-3-mbps-hd-hevc-10bit.mkv -f vout_egl - ++ ++ ++ +diff --git a/pi-util/clean_usr_libs.sh b/pi-util/clean_usr_libs.sh +new file mode 100755 +index 0000000000..67888c8785 +--- /dev/null ++++ b/pi-util/clean_usr_libs.sh +@@ -0,0 +1,16 @@ ++set -e ++U=/usr/lib/arm-linux-gnueabihf ++rm -f $U/libavcodec.* ++rm -f $U/libavdevice.* ++rm -f $U/libavfilter.* ++rm -f $U/libavformat.* ++rm -f $U/libavresample.* ++rm -f $U/libavutil.* ++U=/usr/lib/arm-linux-gnueabihf/neon/vfp ++rm -f $U/libavcodec.* ++rm -f $U/libavdevice.* ++rm -f $U/libavfilter.* ++rm -f $U/libavformat.* ++rm -f $U/libavresample.* ++rm -f $U/libavutil.* ++ diff --git a/pi-util/conf_arm64_native.sh b/pi-util/conf_arm64_native.sh new file mode 100644 index 0000000000..9e3bbfa190 @@ -58203,34 +58924,55 @@ index 0000000000..fc14f2a3c2 +1,WPP_F_ericsson_MAIN_2,WPP_F_ericsson_MAIN_2.bit,WPP_F_ericsson_MAIN_2_yuv.md5 diff --git a/pi-util/conf_native.sh b/pi-util/conf_native.sh new file mode 100755 -index 0000000000..063edbf8af +index 0000000000..38c16b1f11 --- /dev/null +++ b/pi-util/conf_native.sh -@@ -0,0 +1,56 @@ +@@ -0,0 +1,76 @@ +echo "Configure for native build" + +FFSRC=`pwd` ++MC=`uname -m` + -+RPI_OPT_VC=/opt/vc -+RPI_INCLUDES="-I$RPI_OPT_VC/include -I$RPI_OPT_VC/include/interface/vcos/pthreads -I$RPI_OPT_VC/include/interface/vmcs_host/linux" -+RPI_LIBDIRS="-L$RPI_OPT_VC/lib" -+RPI_DEFINES="-D__VCCOREVER__=0x4000000 -mfpu=neon-vfpv4" +#RPI_KEEPS="-save-temps=obj" +RPI_KEEPS="" + ++if [ "$MC" == "aarch64" ]; then ++ echo "M/C aarch64" ++ A=aarch64-linux-gnu ++ B=arm64 ++ MCOPTS= ++ RPI_INCLUDES= ++ RPI_LIBDIRS= ++ RPI_DEFINES= ++ RPI_EXTRALIBS= ++ RPIOPTS="--disable-mmal --enable-sand" ++else ++ echo "M/C armv7" ++ A=arm-linux-gnueabihf ++ B=armv7 ++ MCOPTS="--arch=armv6t2 --cpu=cortex-a7" ++ RPI_OPT_VC=/opt/vc ++ RPI_INCLUDES="-I$RPI_OPT_VC/include -I$RPI_OPT_VC/include/interface/vcos/pthreads -I$RPI_OPT_VC/include/interface/vmcs_host/linux" ++ RPI_LIBDIRS="-L$RPI_OPT_VC/lib" ++ RPI_DEFINES="-D__VCCOREVER__=0x4000000 -mfpu=neon-vfpv4" ++ RPI_EXTRALIBS="-Wl,--start-group -lbcm_host -lmmal -lmmal_util -lmmal_core -lvcos -lvcsm -lvchostif -lvchiq_arm" ++ RPIOPTS="--enable-mmal --enable-rpi" ++fi ++C=`lsb_release -sc` ++ +SHARED_LIBS="--enable-shared" +if [ "$1" == "--noshared" ]; then + SHARED_LIBS="--disable-shared" -+ OUT=out/armv7-static-rel ++ OUT=out/$B-$C-static-rel + echo Static libs +else + echo Shared libs -+ OUT=out/armv7-shared-rel ++ OUT=out/$B-$C-shared-rel +fi + +USR_PREFIX=$FFSRC/$OUT/install -+LIB_PREFIX=$USR_PREFIX/lib/arm-linux-gnueabihf -+INC_PREFIX=$USR_PREFIX/include/arm-linux-gnueabihf ++LIB_PREFIX=$USR_PREFIX/lib/$A ++INC_PREFIX=$USR_PREFIX/include/$A + +mkdir -p $FFSRC/$OUT +cd $FFSRC/$OUT @@ -58239,12 +58981,9 @@ index 0000000000..063edbf8af + --prefix=$USR_PREFIX\ + --libdir=$LIB_PREFIX\ + --incdir=$INC_PREFIX\ -+ --arch=armv6t2\ -+ --cpu=cortex-a7\ ++ $MCOPTS\ + --disable-stripping\ + --disable-thumb\ -+ --enable-mmal\ -+ --enable-rpi\ + --enable-v4l2-request\ + --enable-libdrm\ + --enable-epoxy\ @@ -58252,10 +58991,12 @@ index 0000000000..063edbf8af + --enable-vout-drm\ + --enable-vout-egl\ + $SHARED_LIBS\ ++ $RPIOPTS\ + --extra-cflags="-ggdb $RPI_KEEPS $RPI_DEFINES $RPI_INCLUDES"\ + --extra-cxxflags="$RPI_DEFINES $RPI_INCLUDES"\ + --extra-ldflags="$RPI_LIBDIRS"\ -+ --extra-libs="-Wl,--start-group -lbcm_host -lmmal -lmmal_util -lmmal_core -lvcos -lvcsm -lvchostif -lvchiq_arm"\ ++ --extra-libs="$RPI_EXTRALIBS"\ ++ --extra-version="rpi" + +# --enable-decoder=hevc_rpi\ +# --enable-extra-warnings\ @@ -58373,10 +59114,10 @@ index 0000000000..92cd9e7cfd +# -Wa,-ahls diff --git a/pi-util/ffconf.py b/pi-util/ffconf.py new file mode 100755 -index 0000000000..2e59e6ceb5 +index 0000000000..c76a3734ac --- /dev/null +++ b/pi-util/ffconf.py -@@ -0,0 +1,216 @@ +@@ -0,0 +1,215 @@ +#!/usr/bin/env python + +import string @@ -58388,14 +59129,12 @@ index 0000000000..2e59e6ceb5 +import csv +from stat import * + -+ffmpeg_exec = "./ffmpeg" -+ +CODEC_HEVC_RPI = 1 +HWACCEL_RPI = 2 +HWACCEL_DRM = 3 +HWACCEL_VAAPI = 4 + -+def testone(fileroot, srcname, es_file, md5_file, pix, dectype, vcodec): ++def testone(fileroot, srcname, es_file, md5_file, pix, dectype, vcodec, ffmpeg_exec): + hwaccel = "" + if dectype == HWACCEL_RPI: + hwaccel = "rpi" @@ -58502,7 +59241,7 @@ index 0000000000..2e59e6ceb5 + return True + return False + -+def doconf(csva, tests, test_root, vcodec, dectype): ++def doconf(csva, tests, test_root, vcodec, dectype, ffmpeg_exec): + unx_failures = [] + unx_success = [] + failures = 0 @@ -58514,7 +59253,7 @@ index 0000000000..2e59e6ceb5 + print "==== ", name, + sys.stdout.flush() + -+ rv = testone(os.path.join(test_root, name), name, a[2], a[3], a[4], dectype=dectype, vcodec=vcodec) ++ rv = testone(os.path.join(test_root, name), name, a[2], a[3], a[4], dectype=dectype, vcodec=vcodec, ffmpeg_exec=ffmpeg_exec) + if (rv == 0): + successes += 1 + else: @@ -58569,6 +59308,7 @@ index 0000000000..2e59e6ceb5 + argp.add_argument("--csvgen", action='store_true', help="Generate CSV file for dir") + argp.add_argument("--csv", default="pi-util/conf_h265.2016.csv", help="CSV filename") + argp.add_argument("--vcodec", default="hevc_rpi", help="vcodec name to use") ++ argp.add_argument("--ffmpeg", default="./ffmpeg", help="ffmpeg exec name") + args = argp.parse_args() + + if args.csvgen: @@ -58591,7 +59331,7 @@ index 0000000000..2e59e6ceb5 + elif args.vaapi: + dectype = HWACCEL_VAAPI + -+ doconf(csva, args.tests, args.test_root, args.vcodec, dectype) ++ doconf(csva, args.tests, args.test_root, args.vcodec, dectype, args.ffmpeg) + diff --git a/pi-util/ffperf.py b/pi-util/ffperf.py new file mode 100755 diff --git a/packages/tools/bcm2835-bootloader/package.mk b/packages/tools/bcm2835-bootloader/package.mk index 8af64bfbb6..4db5fedb9c 100644 --- a/packages/tools/bcm2835-bootloader/package.mk +++ b/packages/tools/bcm2835-bootloader/package.mk @@ -3,8 +3,8 @@ # Copyright (C) 2017-present Team LibreELEC (https://libreelec.tv) PKG_NAME="bcm2835-bootloader" -PKG_VERSION="1df55790fb191704c0ce630d4d0713a8beb43a7d" -PKG_SHA256="b6163311508800ac82bae28260ea774ee35d92f86987a09cf0fc7880c61e9040" +PKG_VERSION="5ceac9414fd634dbc0762d80677744465634af2f" +PKG_SHA256="6ae8d2ea912cf6cc52f4fc9bc01a823b00a9f8c2ecb61d261bb0d12bee379c9e" PKG_ARCH="arm aarch64" PKG_LICENSE="nonfree" PKG_SITE="http://www.broadcom.com" diff --git a/packages/tools/rpi-eeprom/package.mk b/packages/tools/rpi-eeprom/package.mk index 4e2d7694e5..ea2df956c0 100644 --- a/packages/tools/rpi-eeprom/package.mk +++ b/packages/tools/rpi-eeprom/package.mk @@ -2,8 +2,8 @@ # Copyright (C) 2019-present Team LibreELEC (https://libreelec.tv) PKG_NAME="rpi-eeprom" -PKG_VERSION="e6107348e051c868493523f57e0c8c08d20921a5" -PKG_SHA256="022a209bf1f24da9873423e1ea52a4e0722c88aaa4969e1555fe3c40c8172f79" +PKG_VERSION="5d70742580859c5f54eb4cb8bf9b730067ffc396" +PKG_SHA256="38d0733257db508e3a539a726bee3b92d59acaeac57cf3103b102b6d23ed4760" PKG_ARCH="arm" PKG_LICENSE="BSD-3/custom" PKG_SITE="https://github.com/raspberrypi/rpi-eeprom" diff --git a/projects/RPi/devices/RPi/linux/linux.arm.conf b/projects/RPi/devices/RPi/linux/linux.arm.conf index b401501e90..38e5ee4cab 100644 --- a/projects/RPi/devices/RPi/linux/linux.arm.conf +++ b/projects/RPi/devices/RPi/linux/linux.arm.conf @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.10.32 Kernel Configuration +# Linux/arm 5.10.44 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Debian 8.3.0-6) 8.3.0" CONFIG_CC_IS_GCC=y @@ -1266,6 +1266,7 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # NVME Support # # CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set # CONFIG_NVME_TARGET is not set # end of NVME Support @@ -2249,6 +2250,7 @@ CONFIG_POWER_RESET_RESTART=y CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y +CONFIG_RPI_POE_POWER=m # CONFIG_PDA_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set @@ -3570,6 +3572,7 @@ CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y CONFIG_SND_DMAENGINE_PCM=y CONFIG_SND_HWDEP=m CONFIG_SND_RAWMIDI=m @@ -3746,6 +3749,7 @@ CONFIG_SND_SOC_CS42XX8_I2C=m # CONFIG_SND_SOC_CX2072X is not set CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=y # CONFIG_SND_SOC_ES7134 is not set # CONFIG_SND_SOC_ES7241 is not set # CONFIG_SND_SOC_ES8316 is not set diff --git a/projects/RPi/devices/RPi2/linux/linux.arm.conf b/projects/RPi/devices/RPi2/linux/linux.arm.conf index 170bb192d4..4bd261131e 100644 --- a/projects/RPi/devices/RPi2/linux/linux.arm.conf +++ b/projects/RPi/devices/RPi2/linux/linux.arm.conf @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.10.32 Kernel Configuration +# Linux/arm 5.10.44 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Debian 8.3.0-6) 8.3.0" CONFIG_CC_IS_GCC=y @@ -1392,6 +1392,7 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # NVME Support # # CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set # CONFIG_NVME_TARGET is not set # end of NVME Support @@ -2383,6 +2384,7 @@ CONFIG_POWER_RESET_RESTART=y CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y +CONFIG_RPI_POE_POWER=m # CONFIG_PDA_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set @@ -3707,6 +3709,7 @@ CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y CONFIG_SND_DMAENGINE_PCM=y CONFIG_SND_HWDEP=m CONFIG_SND_RAWMIDI=m @@ -3883,6 +3886,7 @@ CONFIG_SND_SOC_CS42XX8_I2C=m # CONFIG_SND_SOC_CX2072X is not set CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=y # CONFIG_SND_SOC_ES7134 is not set # CONFIG_SND_SOC_ES7241 is not set # CONFIG_SND_SOC_ES8316 is not set diff --git a/projects/RPi/devices/RPi4/config/distroconfig.txt b/projects/RPi/devices/RPi4/config/distroconfig.txt index b7c599f427..6d2dd077f1 100644 --- a/projects/RPi/devices/RPi4/config/distroconfig.txt +++ b/projects/RPi/devices/RPi4/config/distroconfig.txt @@ -6,3 +6,6 @@ dtoverlay=vc4-kms-v3d,cma-512 dtoverlay=rpivid-v4l2 disable_overscan=1 disable_fw_kms_setup=1 +# temporarily limit framebuffer size to avoid gpu memory issues at 4kp60 +max_framebuffer_width=1920 +max_framebuffer_width=1080 diff --git a/projects/RPi/devices/RPi4/linux/linux.arm.conf b/projects/RPi/devices/RPi4/linux/linux.arm.conf index 6bf4285482..b75a233e83 100644 --- a/projects/RPi/devices/RPi4/linux/linux.arm.conf +++ b/projects/RPi/devices/RPi4/linux/linux.arm.conf @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm 5.10.32 Kernel Configuration +# Linux/arm 5.10.44 Kernel Configuration # CONFIG_CC_VERSION_TEXT="gcc (Debian 8.3.0-6) 8.3.0" CONFIG_CC_IS_GCC=y @@ -1479,6 +1479,7 @@ CONFIG_BLK_DEV_RAM_SIZE=4096 # # CONFIG_BLK_DEV_NVME is not set # CONFIG_NVME_FC is not set +# CONFIG_NVME_TCP is not set # CONFIG_NVME_TARGET is not set # end of NVME Support @@ -2812,6 +2813,7 @@ CONFIG_POWER_RESET_RESTART=y CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set CONFIG_POWER_SUPPLY_HWMON=y +CONFIG_RPI_POE_POWER=m # CONFIG_PDA_POWER is not set # CONFIG_TEST_POWER is not set # CONFIG_CHARGER_ADP5061 is not set @@ -4211,6 +4213,7 @@ CONFIG_SND=y CONFIG_SND_TIMER=y CONFIG_SND_PCM=y CONFIG_SND_PCM_ELD=y +CONFIG_SND_PCM_IEC958=y CONFIG_SND_DMAENGINE_PCM=y CONFIG_SND_HWDEP=m CONFIG_SND_RAWMIDI=m @@ -4452,6 +4455,7 @@ CONFIG_SND_SOC_CS42XX8_I2C=m # CONFIG_SND_SOC_CX2072X is not set CONFIG_SND_SOC_DA7213=m CONFIG_SND_SOC_DMIC=m +CONFIG_SND_SOC_HDMI_CODEC=y # CONFIG_SND_SOC_ES7134 is not set # CONFIG_SND_SOC_ES7241 is not set # CONFIG_SND_SOC_ES8316 is not set diff --git a/projects/RPi/devices/RPi4/patches/kodi/0001-hack-try-to-hide-pixel-wrap-issues.patch b/projects/RPi/devices/RPi4/patches/kodi/0001-hack-try-to-hide-pixel-wrap-issues.patch deleted file mode 100644 index bd7cbeca4c..0000000000 --- a/projects/RPi/devices/RPi4/patches/kodi/0001-hack-try-to-hide-pixel-wrap-issues.patch +++ /dev/null @@ -1,90 +0,0 @@ -From 7220f2de9c3f29f4bd005ef442a656047a69561b Mon Sep 17 00:00:00 2001 -From: popcornmix -Date: Wed, 8 Apr 2020 11:19:22 +0100 -Subject: [PATCH] pi4: hack: Try to hide pixel wrap issue - ---- - .../HwDecRender/VideoLayerBridgeDRMPRIME.cpp | 22 ++++++++++++++----- - xbmc/windowing/gbm/drm/DRMAtomic.cpp | 10 +++++++-- - 2 files changed, 25 insertions(+), 7 deletions(-) - -diff --git a/xbmc/cores/VideoPlayer/VideoRenderers/HwDecRender/VideoLayerBridgeDRMPRIME.cpp b/xbmc/cores/VideoPlayer/VideoRenderers/HwDecRender/VideoLayerBridgeDRMPRIME.cpp -index c78636b680..ed737820f1 100644 ---- a/xbmc/cores/VideoPlayer/VideoRenderers/HwDecRender/VideoLayerBridgeDRMPRIME.cpp -+++ b/xbmc/cores/VideoPlayer/VideoRenderers/HwDecRender/VideoLayerBridgeDRMPRIME.cpp -@@ -11,6 +11,7 @@ - #include "cores/VideoPlayer/Buffers/VideoBufferDRMPRIME.h" - #include "utils/log.h" - #include "windowing/gbm/drm/DRMAtomic.h" -+#include "settings/DisplaySettings.h" - - #include - -@@ -264,14 +265,25 @@ void CVideoLayerBridgeDRMPRIME::SetVideoPlane(CVideoBufferDRMPRIME* buffer, cons - auto plane = m_DRM->GetVideoPlane(); - m_DRM->AddProperty(plane, "FB_ID", buffer->m_fb_id); - m_DRM->AddProperty(plane, "CRTC_ID", m_DRM->GetCrtc()->GetCrtcId()); -+ -+ uint32_t srcw = buffer->GetWidth(); -+ uint32_t dstw = static_cast(destRect.Width()); -+ int32_t dstx = static_cast(destRect.x1); -+ double scalex = (double)srcw / (double)dstw; -+ RESOLUTION_INFO &res = CDisplaySettings::GetInstance().GetCurrentResolutionInfo(); -+ if (dstw > 1 && dstx + dstw > (uint32_t)res.iScreenWidth - 1) -+ { -+ dstw -= 1; -+ srcw = (uint32_t)(srcw - 1.0 * scalex + 0.5); -+ } - m_DRM->AddProperty(plane, "SRC_X", 0); - m_DRM->AddProperty(plane, "SRC_Y", 0); -- m_DRM->AddProperty(plane, "SRC_W", buffer->GetWidth() << 16); -+ m_DRM->AddProperty(plane, "SRC_W", srcw << 16); - m_DRM->AddProperty(plane, "SRC_H", buffer->GetHeight() << 16); -- m_DRM->AddProperty(plane, "CRTC_X", static_cast(destRect.x1) & ~1); -- m_DRM->AddProperty(plane, "CRTC_Y", static_cast(destRect.y1) & ~1); -- m_DRM->AddProperty(plane, "CRTC_W", (static_cast(destRect.Width()) + 1) & ~1); -- m_DRM->AddProperty(plane, "CRTC_H", (static_cast(destRect.Height()) + 1) & ~1); -+ m_DRM->AddProperty(plane, "CRTC_X", dstx); -+ m_DRM->AddProperty(plane, "CRTC_Y", static_cast(destRect.y1)); -+ m_DRM->AddProperty(plane, "CRTC_W", dstw); -+ m_DRM->AddProperty(plane, "CRTC_H", static_cast(destRect.Height())); - } - - void CVideoLayerBridgeDRMPRIME::UpdateVideoPlane() -diff --git a/xbmc/windowing/gbm/drm/DRMAtomic.cpp b/xbmc/windowing/gbm/drm/DRMAtomic.cpp -index 5d61a699d4..de02d84758 100644 ---- a/xbmc/windowing/gbm/drm/DRMAtomic.cpp -+++ b/xbmc/windowing/gbm/drm/DRMAtomic.cpp -@@ -92,13 +92,19 @@ void CDRMAtomic::DrmAtomicCommit(int fb_id, int flags, bool rendered, bool video - return; - } - -+ uint32_t srcw = m_width; -+ uint32_t dstw = m_mode->hdisplay; -+ double scalex = (double)srcw / (double)dstw; -+ dstw -= 1; -+ srcw = (uint32_t)(srcw - 1.0 * scalex + 0.5); -+ - if (rendered) - { - AddProperty(m_gui_plane, "FB_ID", fb_id); - AddProperty(m_gui_plane, "CRTC_ID", m_crtc->GetCrtcId()); - AddProperty(m_gui_plane, "SRC_X", 0); - AddProperty(m_gui_plane, "SRC_Y", 0); -- AddProperty(m_gui_plane, "SRC_W", m_width << 16); -+ AddProperty(m_gui_plane, "SRC_W", srcw << 16); - AddProperty(m_gui_plane, "SRC_H", m_height << 16); - AddProperty(m_gui_plane, "CRTC_X", 0); - AddProperty(m_gui_plane, "CRTC_Y", 0); -@@ -109,7 +115,7 @@ void CDRMAtomic::DrmAtomicCommit(int fb_id, int flags, bool rendered, bool video - // } - // else - { -- AddProperty(m_gui_plane, "CRTC_W", m_mode->hdisplay); -+ AddProperty(m_gui_plane, "CRTC_W", dstw); - AddProperty(m_gui_plane, "CRTC_H", m_mode->vdisplay); - } - --- -2.20.1 - diff --git a/projects/RPi/filesystem/usr/share/alsa/cards/vc4-hdmi.conf b/projects/RPi/filesystem/usr/share/alsa/cards/vc4-hdmi.conf index dc1e477a35..9b1c1d688a 100644 --- a/projects/RPi/filesystem/usr/share/alsa/cards/vc4-hdmi.conf +++ b/projects/RPi/filesystem/usr/share/alsa/cards/vc4-hdmi.conf @@ -35,6 +35,7 @@ vc4-hdmi.pcm.hdmi.0 { hook_args [ { name "IEC958 Playback Default" + interface PCM optional true lock true preserve true diff --git a/tools/ffmpeg/gen-patches.sh b/tools/ffmpeg/gen-patches.sh index ae80b6c2fc..038ac4dfe2 100755 --- a/tools/ffmpeg/gen-patches.sh +++ b/tools/ffmpeg/gen-patches.sh @@ -2,7 +2,7 @@ # base ffmpeg version KODI_FFMPEG_REPO="https://github.com/xbmc/FFmpeg" -KODI_FFMPEG_VERSION="4.3.1-Matrix-Alpha1-1" +KODI_FFMPEG_VERSION="4.3.2-Matrix-19.1" ALL_FEATURE_SETS="v4l2-drmprime v4l2-request libreelec rpi" @@ -37,7 +37,7 @@ create_patch() { ;; rpi) REPO="https://github.com/jc-kynesim/rpi-ffmpeg" - REFSPEC="dev/4.3.1/drm_prime_1" + REFSPEC="dev/4.3.2/clean_3" ;; *) echo "illegal feature set ${FEATURE_SET}"