diff --git a/.forgejo/workflows/build-alpine.yaml b/.forgejo/workflows/build-alpine.yaml index 9fc5ff5ea..5f779d3a9 100644 --- a/.forgejo/workflows/build-alpine.yaml +++ b/.forgejo/workflows/build-alpine.yaml @@ -5,21 +5,6 @@ on: workflow_dispatch: jobs: - prepare: - name: Prepare - runs-on: Pmbootstrap - outputs: - time: ${{ steps.time.outputs.time }} - steps: - - name: Set start Time - id: time - shell: sh - run: echo time=$(date +"%Y%m%d%H%M%S") >> $GITHUB_OUTPUT - - name: Update pmbootstrap - uses: actions/pmbootstrap-update@master - - name: Remove libcamera aport - run: rm -rf ${{env.PMB_PMAPORTS}}/temp/libcamera - build: name: Build for ${{ matrix.info.arch }} runs-on: Pmbootstrap @@ -28,31 +13,25 @@ jobs: info: - arch: x86_64 - arch: aarch64 - needs: prepare steps: + - name: Remove libcamera aport + run: rm -rf ${{env.PMB_PMAPORTS}}/temp/libcamera - name: Check out repository code uses: actions/checkout@v4 - name: Build packages id: build uses: actions/pmbootstrap-build@main with: - name: libcamera + name: libcamera-neko-gpu aports: ${{github.workspace}}/package/alpine arch: ${{ matrix.info.arch }} src: ${{github.workspace}} - time: ${{ needs.prepare.outputs.time }} - name: "Upload packages" uses: actions/upload-alpine-package@main with: files: ${{steps.build.outputs.packages}} secret: ${{secrets.PACKAGE_TOKEN}} - - clean: - name: "Clean" - runs-on: Pmbootstrap - needs: build - if: always() - continue-on-error: true - steps: - - name: Update pmbootstrap - uses: actions/pmbootstrap-update@master + - name: Reset pmaports changes + if: always() + continue-on-error: true + run: git -C ${{env.PMB_PMAPORTS}} reset --hard diff --git a/.forgejo/workflows/sync-with-upstream.yaml b/.forgejo/workflows/sync-with-upstream.yaml deleted file mode 100644 index 808cbaee8..000000000 --- a/.forgejo/workflows/sync-with-upstream.yaml +++ /dev/null @@ -1,18 +0,0 @@ -name: Sync fork with upstream -run-name: Sync fork with upstream -on: - schedule: - - cron: "@daily" - workflow_dispatch: - -jobs: - sync: - name: Sync - runs-on: Misc - steps: - - name: Sync repository with upstream - uses: actions/sync-with-mirror@main - with: - secret: ${{ secrets.PUSH_TOKEN }} - name: libcamera - branch: master diff --git a/include/libcamera/framebuffer.h b/include/libcamera/framebuffer.h index e83825b46..ff8392430 100644 --- a/include/libcamera/framebuffer.h +++ b/include/libcamera/framebuffer.h @@ -26,7 +26,6 @@ struct FrameMetadata { FrameSuccess, FrameError, FrameCancelled, - FrameStartup, }; struct Plane { diff --git a/include/libcamera/internal/clock_recovery.h b/include/libcamera/internal/clock_recovery.h deleted file mode 100644 index 43e46b7dc..000000000 --- a/include/libcamera/internal/clock_recovery.h +++ /dev/null @@ -1,68 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Raspberry Pi Ltd - * - * Camera recovery algorithm - */ -#pragma once - -#include - -namespace libcamera { - -class ClockRecovery -{ -public: - ClockRecovery(); - - void configure(unsigned int numSamples = 100, unsigned int maxJitter = 2000, - unsigned int minSamples = 10, unsigned int errorThreshold = 50000); - void reset(); - - void addSample(); - void addSample(uint64_t input, uint64_t output); - - uint64_t getOutput(uint64_t input); - -private: - /* Approximate number of samples over which the model state persists. */ - unsigned int numSamples_; - /* Remove any output jitter larger than this immediately. */ - unsigned int maxJitter_; - /* Number of samples required before we start to use model estimates. */ - unsigned int minSamples_; - /* Threshold above which we assume the wallclock has been reset. */ - unsigned int errorThreshold_; - - /* How many samples seen (up to numSamples_). */ - unsigned int count_; - /* This gets subtracted from all input values, just to make the numbers easier. */ - uint64_t inputBase_; - /* As above, for the output. */ - uint64_t outputBase_; - /* The previous input sample. */ - uint64_t lastInput_; - /* The previous output sample. */ - uint64_t lastOutput_; - - /* Average x value seen so far. */ - double xAve_; - /* Average y value seen so far */ - double yAve_; - /* Average x^2 value seen so far. */ - double x2Ave_; - /* Average x*y value seen so far. */ - double xyAve_; - - /* - * The latest estimate of linear parameters to derive the output clock - * from the input. - */ - double slope_; - double offset_; - - /* Use this cumulative error to monitor for spontaneous clock updates. */ - double error_; -}; - -} /* namespace libcamera */ diff --git a/include/libcamera/internal/delayed_controls.h b/include/libcamera/internal/delayed_controls.h index b64d8bba7..e8d3014d9 100644 --- a/include/libcamera/internal/delayed_controls.h +++ b/include/libcamera/internal/delayed_controls.h @@ -10,15 +10,13 @@ #include #include -#include - #include namespace libcamera { class V4L2Device; -class DelayedControls : public Object +class DelayedControls { public: struct ControlParams { diff --git a/include/libcamera/internal/egl.h b/include/libcamera/internal/egl.h new file mode 100644 index 000000000..04d637d88 --- /dev/null +++ b/include/libcamera/internal/egl.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Linaro Ltd. + * + * Authors: + * Bryan O'Donoghue + * + * egl_context.cpp - Helper class for managing eGL interactions. + */ + +#pragma once + +#include + +#include + +#include "libcamera/internal/gbm.h" + +#define EGL_EGLEXT_PROTOTYPES +#include +#include +#define GL_GLEXT_PROTOTYPES +#include +#include + +namespace libcamera { + +LOG_DECLARE_CATEGORY(eGL) + +class eGLImage +{ +public: + eGLImage(uint32_t width, uint32_t height, uint32_t bpp, GLenum texture_unit, uint32_t texture_unit_uniform_id) + { + image_ = EGL_NO_IMAGE_KHR; + width_ = width; + height_ = height; + bpp_ = bpp; + stride_ = width_ * bpp_ / 4; + framesize_ = stride_ * height_; + texture_unit_ = texture_unit; + texture_unit_uniform_id_ = texture_unit_uniform_id; + + glGenTextures(1, &texture_); + } + + ~eGLImage() + { + glDeleteTextures(1, &texture_); + } + + uint32_t width_; + uint32_t height_; + uint32_t stride_; + uint32_t offset_; + uint32_t framesize_; + uint32_t bpp_; + uint32_t texture_unit_uniform_id_; + GLenum texture_unit_; + GLuint texture_; + EGLImageKHR image_; +}; + +class eGL +{ +public: + eGL(); + ~eGL(); + + int initEGLContext(GBM *gbmContext); + int createDMABufTexture2D(eGLImage *eglImage, int fd); + void destroyDMABufTexture(eGLImage *eglImage); + void createTexture2D(eGLImage *eglImage, GLint format, uint32_t width, uint32_t height, void *data); + void createTexture1D(eGLImage *eglImage, GLint format, uint32_t width, void *data); + + void pushEnv(std::vector &shaderEnv, const char *str); + void makeCurrent(); + void swapBuffers(); + + int compileVertexShader(GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv); + int compileFragmentShader(GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv); + int linkProgram(GLuint &programIdd, GLuint fragmentshaderId, GLuint vertexshaderId); + void dumpShaderSource(GLuint shaderId); + void useProgram(GLuint programId); + +private: + int fd_; + + EGLDisplay display_; + EGLContext context_; + EGLSurface surface_; + + int compileShader(int shaderType, GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv); + + PFNEGLEXPORTDMABUFIMAGEMESAPROC eglExportDMABUFImageMESA; + PFNGLEGLIMAGETARGETTEXTURE2DOESPROC glEGLImageTargetTexture2DOES; + + PFNEGLCREATEIMAGEKHRPROC eglCreateImageKHR; + PFNEGLDESTROYIMAGEKHRPROC eglDestroyImageKHR; + + PFNEGLCLIENTWAITSYNCKHRPROC eglClientWaitSyncKHR; + PFNEGLCREATESYNCKHRPROC eglCreateSyncKHR; +}; +} //namespace libcamera diff --git a/include/libcamera/internal/gbm.h b/include/libcamera/internal/gbm.h new file mode 100644 index 000000000..a5486cc94 --- /dev/null +++ b/include/libcamera/internal/gbm.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Linaro Ltd. + * + * Authors: + * Bryan O'Donoghue + * + * gbm.h - Helper class for managing GBM interactions. + */ + +#pragma once + +#include + +#include + +#include + +namespace libcamera { + +LOG_DECLARE_CATEGORY(GBM) + +class GBM +{ +public: + GBM(); + ~GBM(); + + int initSurface(uint32_t width, uint32_t height); + int mapSurface(); + int getFrameBufferData(uint8_t *data_out, size_t data_len); + struct gbm_device *getDevice() { return gbm_device_; } + struct gbm_surface *getSurface() { return gbm_surface_; } + uint32_t getFrameSize() { return framesize_; } + uint32_t getStride() { return stride_; } + PixelFormat getPixelFormat() { return format_; } + +private: + int fd_; + struct gbm_device *gbm_device_; + struct gbm_surface *gbm_surface_; + + struct gbm_bo *gbm_bo_; + uint32_t width_; + uint32_t height_; + uint32_t stride_; + uint32_t offset_; + uint32_t framesize_; + void *map_; + int bo_fd_; + + PixelFormat format_; +}; + +} // namespace libcamera diff --git a/include/libcamera/internal/mapped_framebuffer.h b/include/libcamera/internal/mapped_framebuffer.h index 6aaabf508..9a5355c76 100644 --- a/include/libcamera/internal/mapped_framebuffer.h +++ b/include/libcamera/internal/mapped_framebuffer.h @@ -55,6 +55,10 @@ public: using MapFlags = Flags; MappedFrameBuffer(const FrameBuffer *buffer, MapFlags flags); + int getPlaneFD(int plane); + +private: + const FrameBuffer *buffer_; }; LIBCAMERA_FLAGS_ENABLE_OPERATORS(MappedFrameBuffer::MapFlag) diff --git a/include/libcamera/internal/meson.build b/include/libcamera/internal/meson.build index 5c80a28c4..4a2919f61 100644 --- a/include/libcamera/internal/meson.build +++ b/include/libcamera/internal/meson.build @@ -1,6 +1,7 @@ # SPDX-License-Identifier: CC0-1.0 subdir('tracepoints') +subdir('shaders') libcamera_internal_headers = files([ 'bayer_format.h', @@ -11,7 +12,6 @@ libcamera_internal_headers = files([ 'camera_manager.h', 'camera_sensor.h', 'camera_sensor_properties.h', - 'clock_recovery.h', 'control_serializer.h', 'control_validator.h', 'converter.h', @@ -23,6 +23,7 @@ libcamera_internal_headers = files([ 'dma_buf_allocator.h', 'formats.h', 'framebuffer.h', + 'gbm.h', 'ipa_data_serializer.h', 'ipa_manager.h', 'ipa_module.h', @@ -58,5 +59,14 @@ tracepoints_h = custom_target( libcamera_internal_headers += tracepoints_h +libcamera_shader_headers = custom_target( + 'gen-shader-headers', + input : [shader_files], + output : 'glsl_shaders.h', + command : [gen_shader_headers, meson.project_source_root(), meson.project_build_root(), '@OUTPUT@', '@INPUT@'], +) + +libcamera_internal_headers += libcamera_shader_headers + subdir('converter') subdir('software_isp') diff --git a/include/libcamera/internal/process.h b/include/libcamera/internal/process.h index 6c34aef2f..b1d07a5a5 100644 --- a/include/libcamera/internal/process.h +++ b/include/libcamera/internal/process.h @@ -11,7 +11,6 @@ #include #include -#include #include #include @@ -43,8 +42,6 @@ public: Signal finished; private: - LIBCAMERA_DISABLE_COPY_AND_MOVE(Process) - void closeAllFdsExcept(const std::vector &fds); int isolate(); void died(int wstatus); diff --git a/src/apps/qcam/assets/shader/RGB.frag b/include/libcamera/internal/shaders/RGB.frag similarity index 93% rename from src/apps/qcam/assets/shader/RGB.frag rename to include/libcamera/internal/shaders/RGB.frag index 4c374ac98..724395894 100644 --- a/src/apps/qcam/assets/shader/RGB.frag +++ b/include/libcamera/internal/shaders/RGB.frag @@ -6,7 +6,7 @@ */ #ifdef GL_ES -precision mediump float; +precision highp float; #endif varying vec2 textureOut; diff --git a/src/apps/qcam/assets/shader/YUV_2_planes.frag b/include/libcamera/internal/shaders/YUV_2_planes.frag similarity index 97% rename from src/apps/qcam/assets/shader/YUV_2_planes.frag rename to include/libcamera/internal/shaders/YUV_2_planes.frag index 1d5d12062..d286f1179 100644 --- a/src/apps/qcam/assets/shader/YUV_2_planes.frag +++ b/include/libcamera/internal/shaders/YUV_2_planes.frag @@ -6,7 +6,7 @@ */ #ifdef GL_ES -precision mediump float; +precision highp float; #endif varying vec2 textureOut; diff --git a/src/apps/qcam/assets/shader/YUV_3_planes.frag b/include/libcamera/internal/shaders/YUV_3_planes.frag similarity index 96% rename from src/apps/qcam/assets/shader/YUV_3_planes.frag rename to include/libcamera/internal/shaders/YUV_3_planes.frag index 8f788e90a..8e3e0b4a5 100644 --- a/src/apps/qcam/assets/shader/YUV_3_planes.frag +++ b/include/libcamera/internal/shaders/YUV_3_planes.frag @@ -6,7 +6,7 @@ */ #ifdef GL_ES -precision mediump float; +precision highp float; #endif varying vec2 textureOut; diff --git a/src/apps/qcam/assets/shader/YUV_packed.frag b/include/libcamera/internal/shaders/YUV_packed.frag similarity index 99% rename from src/apps/qcam/assets/shader/YUV_packed.frag rename to include/libcamera/internal/shaders/YUV_packed.frag index b9ef9d41b..3c9e3e397 100644 --- a/src/apps/qcam/assets/shader/YUV_packed.frag +++ b/include/libcamera/internal/shaders/YUV_packed.frag @@ -6,7 +6,7 @@ */ #ifdef GL_ES -precision mediump float; +precision highp float; #endif varying vec2 textureOut; diff --git a/src/apps/qcam/assets/shader/bayer_1x_packed.frag b/include/libcamera/internal/shaders/bayer_1x_packed.frag similarity index 76% rename from src/apps/qcam/assets/shader/bayer_1x_packed.frag rename to include/libcamera/internal/shaders/bayer_1x_packed.frag index f53f55758..c0632eb1f 100644 --- a/src/apps/qcam/assets/shader/bayer_1x_packed.frag +++ b/include/libcamera/internal/shaders/bayer_1x_packed.frag @@ -20,7 +20,7 @@ */ #ifdef GL_ES -precision mediump float; +precision highp float; #endif /* @@ -65,6 +65,10 @@ uniform vec2 tex_step; uniform vec2 tex_bayer_first_red; uniform sampler2D tex_y; +uniform sampler2D red_param; +uniform sampler2D green_param; +uniform sampler2D blue_param; +uniform mat3 ccm; void main(void) { @@ -212,5 +216,61 @@ void main(void) vec3(patterns.y, C, patterns.x) : vec3(patterns.wz, C)); +#if defined(APPLY_CCM_PARAMETERS) + /* + * CCM is a 3x3 in the format + * + * +--------------+----------------+---------------+ + * | RedRedGain | RedGreenGain | RedBlueGain | + * +--------------+----------------+---------------+ + * | GreenRedGain | GreenGreenGain | GreenBlueGain | + * +--------------+----------------+---------------+ + * | BlueRedGain | BlueGreenGain | BlueBlueGain | + * +--------------+----------------+---------------+ + * + * Rout = RedRedGain * Rin + RedGreenGain * Gin + RedBlueGain * Bin + * Gout = GreenRedGain * Rin + GreenGreenGain * Gin + GreenBlueGain * Bin + * Bout = BlueRedGain * Rin + BlueGreenGain * Gin + BlueBlueGain * Bin + * + * We upload to the GPU without transposition glUniformMatrix3f(.., .., GL_FALSE, ccm); + * + * CPU + * float ccm [] = { + * RedRedGain, RedGreenGain, RedBlueGain, + * GreenRedGain, GreenGreenGain, GreenBlueGain, + * BlueRedGain, BlueGreenGain, BlueBlueGain, + * }; + * + * GPU + * ccm = { + * RedRedGain, GreenRedGain, BlueRedGain, + * RedGreenGain, GreenGreenGain, BlueGreenGain, + * RedBlueGain, GreenBlueGain, BlueBlueGain, + * } + * + * However the indexing for the mat data-type is column major hence + * ccm[0][0] = RedRedGain, ccm[0][1] = RedGreenGain, ccm[0][2] = RedBlueGain + * + */ + float rin, gin, bin; + rin = rgb.r; + gin = rgb.g; + bin = rgb.b; + + rgb.r = (rin * ccm[0][0]) + (gin * ccm[0][1]) + (bin * ccm[0][2]); + rgb.g = (rin * ccm[1][0]) + (gin * ccm[1][1]) + (bin * ccm[1][2]); + rgb.b = (rin * ccm[2][0]) + (gin * ccm[2][1]) + (bin * ccm[2][2]); + +#elif defined(APPLY_RGB_PARAMETERS) + /* Apply bayer params */ + rgb.r = texture2D(red_param, vec2(rgb.r, 0.5)).r; + rgb.g = texture2D(green_param, vec2(rgb.g, 0.5)).g; + rgb.b = texture2D(blue_param, vec2(rgb.b, 0.5)).b; +#endif + +#if defined (SWAP_BLUE) + gl_FragColor = vec4(rgb.bgr, 1.0); +#else gl_FragColor = vec4(rgb, 1.0); +#endif } diff --git a/src/apps/qcam/assets/shader/bayer_8.frag b/include/libcamera/internal/shaders/bayer_unpacked.frag similarity index 56% rename from src/apps/qcam/assets/shader/bayer_8.frag rename to include/libcamera/internal/shaders/bayer_unpacked.frag index 7e35ca88e..78c2609c2 100644 --- a/src/apps/qcam/assets/shader/bayer_8.frag +++ b/include/libcamera/internal/shaders/bayer_unpacked.frag @@ -16,19 +16,33 @@ Copyright (C) 2021, Linaro //Pixel Shader #ifdef GL_ES -precision mediump float; +precision highp float; #endif /** Monochrome RGBA or GL_LUMINANCE Bayer encoded texture.*/ uniform sampler2D tex_y; +uniform sampler2D red_param; +uniform sampler2D green_param; +uniform sampler2D blue_param; varying vec4 center; varying vec4 yCoord; varying vec4 xCoord; +uniform mat3 ccm; void main(void) { - #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r + vec3 rgb; - float C = texture2D(tex_y, center.xy).r; // ( 0, 0) + #if defined(RAW10P) + #define pixel(p) p.r / 4.0 + p.g * 64.0 + #define fetch(x, y) pixel(texture2D(tex_y, vec2(x, y))) + #elif defined(RAW12P) + #define pixel(p) p.r / 16.0 + p.g * 16.0 + #define fetch(x, y) pixel(texture2D(tex_y, vec2(x, y))) + #else + #define fetch(x, y) texture2D(tex_y, vec2(x, y)).r + #endif + + float C = fetch(center.x, center.y); // ( 0, 0) const vec4 kC = vec4( 4.0, 6.0, 5.0, 5.0) / 8.0; // Determine which of four types of pixels we are on. @@ -97,11 +111,69 @@ void main(void) { PATTERN.xw += kB.xw * B; PATTERN.xz += kF.xz * F; - gl_FragColor.rgb = (alternate.y == 0.0) ? + rgb = (alternate.y == 0.0) ? ((alternate.x == 0.0) ? vec3(C, PATTERN.xy) : vec3(PATTERN.z, C, PATTERN.w)) : ((alternate.x == 0.0) ? vec3(PATTERN.w, C, PATTERN.z) : vec3(PATTERN.yx, C)); + +#if defined(APPLY_CCM_PARAMETERS) + /* + * CCM is a 3x3 in the format + * + * +--------------+----------------+---------------+ + * | RedRedGain | RedGreenGain | RedBlueGain | + * +--------------+----------------+---------------+ + * | GreenRedGain | GreenGreenGain | GreenBlueGain | + * +--------------+----------------+---------------+ + * | BlueRedGain | BlueGreenGain | BlueBlueGain | + * +--------------+----------------+---------------+ + * + * Rout = RedRedGain * Rin + RedGreenGain * Gin + RedBlueGain * Bin + * Gout = GreenRedGain * Rin + GreenGreenGain * Gin + GreenBlueGain * Bin + * Bout = BlueRedGain * Rin + BlueGreenGain * Gin + BlueBlueGain * Bin + * + * We upload to the GPU without transposition glUniformMatrix3f(.., .., GL_FALSE, ccm); + * + * CPU + * float ccm [] = { + * RedRedGain, RedGreenGain, RedBlueGain, + * GreenRedGain, GreenGreenGain, GreenBlueGain, + * BlueRedGain, BlueGreenGain, BlueBlueGain, + * }; + * + * GPU + * ccm = { + * RedRedGain, GreenRedGain, BlueRedGain, + * RedGreenGain, GreenGreenGain, BlueGreenGain, + * RedBlueGain, GreenBlueGain, BlueBlueGain, + * } + * + * However the indexing for the mat data-type is column major hence + * ccm[0][0] = RedRedGain, ccm[0][1] = RedGreenGain, ccm[0][2] = RedBlueGain + * + */ + float rin, gin, bin; + rin = rgb.r; + gin = rgb.g; + bin = rgb.b; + + rgb.r = (rin * ccm[0][0]) + (gin * ccm[0][1]) + (bin * ccm[0][2]); + rgb.g = (rin * ccm[1][0]) + (gin * ccm[1][1]) + (bin * ccm[1][2]); + rgb.b = (rin * ccm[2][0]) + (gin * ccm[2][1]) + (bin * ccm[2][2]); + +#elif defined(APPLY_RGB_PARAMETERS) + /* Apply bayer params */ + rgb.r = texture2D(red_param, vec2(rgb.r, 0.5)).r; + rgb.g = texture2D(red_param, vec2(rgb.g, 0.5)).g; + rgb.b = texture2D(red_param, vec2(rgb.b, 0.5)).b; +#endif + +#if defined (SWAP_BLUE) + gl_FragColor = vec4(rgb.bgr, 1.0); +#else + gl_FragColor = vec4(rgb, 1.0); +#endif } diff --git a/src/apps/qcam/assets/shader/bayer_8.vert b/include/libcamera/internal/shaders/bayer_unpacked.vert similarity index 85% rename from src/apps/qcam/assets/shader/bayer_8.vert rename to include/libcamera/internal/shaders/bayer_unpacked.vert index fb5109eee..fc1cf89f2 100644 --- a/src/apps/qcam/assets/shader/bayer_8.vert +++ b/include/libcamera/internal/shaders/bayer_unpacked.vert @@ -44,10 +44,10 @@ void main(void) { center.xy = textureIn; center.zw = textureIn * tex_size + tex_bayer_first_red; - xCoord = center.x + vec4(-2.0 * tex_step.x, - -tex_step.x, tex_step.x, 2.0 * tex_step.x); - yCoord = center.y + vec4(-2.0 * tex_step.y, - -tex_step.y, tex_step.y, 2.0 * tex_step.y); + xCoord = center.x + 0.1 * tex_step.x + + vec4(-2.0 * tex_step.x, -tex_step.x, tex_step.x, 2.0 * tex_step.x); + yCoord = center.y + 0.1 * tex_step.y + + vec4(-2.0 * tex_step.y, -tex_step.y, tex_step.y, 2.0 * tex_step.y); gl_Position = proj_matrix * vertexIn; } diff --git a/src/apps/qcam/assets/shader/identity.vert b/include/libcamera/internal/shaders/identity.vert similarity index 100% rename from src/apps/qcam/assets/shader/identity.vert rename to include/libcamera/internal/shaders/identity.vert diff --git a/include/libcamera/internal/shaders/meson.build b/include/libcamera/internal/shaders/meson.build new file mode 100644 index 000000000..dd441a577 --- /dev/null +++ b/include/libcamera/internal/shaders/meson.build @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: CC0-1.0 + +# List of shader files to convert to header hex +# for the purposes of inclusion in OpenGL debayering +shader_files = files([ + 'bayer_1x_packed.frag', + 'bayer_unpacked.frag', + 'bayer_unpacked.vert', + 'identity.vert', +]) diff --git a/include/libcamera/internal/software_isp/benchmark.h b/include/libcamera/internal/software_isp/benchmark.h new file mode 100644 index 000000000..8af250154 --- /dev/null +++ b/include/libcamera/internal/software_isp/benchmark.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Authors: + * Hans de Goede + * + * Simple builtin benchmark to measure software ISP processing times + */ + +#pragma once + +#include +#include + +namespace libcamera { + +class Benchmark +{ +public: + Benchmark(); + ~Benchmark(); + + void startFrame(void); + void finishFrame(void); + +private: + unsigned int measuredFrames_; + int64_t frameProcessTime_; + timespec frameStartTime_; + /* Skip 30 frames for things to stabilize then measure 30 frames */ + static constexpr unsigned int kFramesToSkip = 30; + static constexpr unsigned int kLastFrameToMeasure = 60; +}; + +} /* namespace libcamera */ diff --git a/include/libcamera/internal/software_isp/debayer_params.h b/include/libcamera/internal/software_isp/debayer_params.h index 6c36defc9..56803195e 100644 --- a/include/libcamera/internal/software_isp/debayer_params.h +++ b/include/libcamera/internal/software_isp/debayer_params.h @@ -13,6 +13,8 @@ #include #include +#include "libcamera/internal/matrix.h" + namespace libcamera { struct DebayerParams { @@ -51,13 +53,9 @@ struct DebayerParams { LookupTable gammaLut; /* - * Statistic controls - * - * Statistic collecting are very slow. We can disable it for some actions like - * video capture or streaming. - * TODO: Add statistic window control + * Per frame CCM values as calcualted by the IPA */ - bool collect_stats; + Matrix ccm; }; } /* namespace libcamera */ diff --git a/include/libcamera/internal/software_isp/meson.build b/include/libcamera/internal/software_isp/meson.build index 508ddddca..df7c3b97d 100644 --- a/include/libcamera/internal/software_isp/meson.build +++ b/include/libcamera/internal/software_isp/meson.build @@ -1,7 +1,9 @@ # SPDX-License-Identifier: CC0-1.0 libcamera_internal_headers += files([ + 'benchmark.h', 'debayer_params.h', 'software_isp.h', 'swisp_stats.h', + 'swstats_cpu.h', ]) diff --git a/include/libcamera/internal/software_isp/software_isp.h b/include/libcamera/internal/software_isp/software_isp.h index 8f3a6b1b1..9e5e05fc0 100644 --- a/include/libcamera/internal/software_isp/software_isp.h +++ b/include/libcamera/internal/software_isp/software_isp.h @@ -37,7 +37,7 @@ namespace libcamera { -class DebayerCpu; +class Debayer; class FrameBuffer; class PixelFormat; class Stream; @@ -94,8 +94,7 @@ private: void statsReady(uint32_t frame, uint32_t bufferId); void inputReady(FrameBuffer *input); void outputReady(FrameBuffer *output); - - std::unique_ptr debayer_; + std::unique_ptr debayer_; Thread ispWorkerThread_; SharedMemObject sharedParams_; DebayerParams debayerParams_; diff --git a/include/libcamera/internal/software_isp/swisp_stats.h b/include/libcamera/internal/software_isp/swisp_stats.h index 3377dd825..ae11f112e 100644 --- a/include/libcamera/internal/software_isp/swisp_stats.h +++ b/include/libcamera/internal/software_isp/swisp_stats.h @@ -44,10 +44,6 @@ struct SwIspStats { * \brief A histogram of luminance values */ Histogram yHistogram; - /** - * \brief Holds the sharpness of an image - */ - uint64_t sharpness; }; } /* namespace libcamera */ diff --git a/src/libcamera/software_isp/swstats_cpu.h b/include/libcamera/internal/software_isp/swstats_cpu.h similarity index 86% rename from src/libcamera/software_isp/swstats_cpu.h rename to include/libcamera/internal/software_isp/swstats_cpu.h index 26a2f462e..fa47cec91 100644 --- a/src/libcamera/software_isp/swstats_cpu.h +++ b/include/libcamera/internal/software_isp/swstats_cpu.h @@ -18,12 +18,16 @@ #include #include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/framebuffer.h" #include "libcamera/internal/shared_mem_object.h" #include "libcamera/internal/software_isp/swisp_stats.h" +#include "benchmark.h" + namespace libcamera { class PixelFormat; +class MappedFrameBuffer; struct StreamConfiguration; class SwStatsCpu @@ -42,6 +46,7 @@ public: void setWindow(const Rectangle &window); void startFrame(); void finishFrame(uint32_t frame, uint32_t bufferId); + void processFrame(uint32_t frame, uint32_t bufferId, FrameBuffer *input); void processLine0(unsigned int y, const uint8_t *src[]) { @@ -65,6 +70,7 @@ public: private: using statsProcessFn = void (SwStatsCpu::*)(const uint8_t *src[]); + using processFrameFn = void (SwStatsCpu::*)(MappedFrameBuffer &in); int setupStandardBayerOrder(BayerFormat::Order order); /* Bayer 8 bpp unpacked */ @@ -77,6 +83,10 @@ private: void statsBGGR10PLine0(const uint8_t *src[]); void statsGBRG10PLine0(const uint8_t *src[]); + void processBayerFrame2(MappedFrameBuffer &in); + + processFrameFn processFrame_; + /* Variables set by configure(), used every line */ statsProcessFn stats0_; statsProcessFn stats2_; @@ -89,9 +99,11 @@ private: Size patternSize_; unsigned int xShift_; + unsigned int stride_; SharedMemObject sharedStats_; SwIspStats stats_; + Benchmark bench_; }; } /* namespace libcamera */ diff --git a/include/libcamera/ipa/raspberrypi.mojom b/include/libcamera/ipa/raspberrypi.mojom index 12b083e9d..e30c70bde 100644 --- a/include/libcamera/ipa/raspberrypi.mojom +++ b/include/libcamera/ipa/raspberrypi.mojom @@ -52,8 +52,7 @@ struct ConfigResult { struct StartResult { libcamera.ControlList controls; - int32 startupFrameCount; - int32 invalidFrameCount; + int32 dropFrameCount; }; struct PrepareParams { diff --git a/package/alpine/APKBUILD b/package/alpine/APKBUILD index ddfc72950..1f31843a2 100644 --- a/package/alpine/APKBUILD +++ b/package/alpine/APKBUILD @@ -1,4 +1,5 @@ -pkgname=libcamera +basepkgname=libcamera +pkgname=$basepkgname-neko-gpu pkgver=9999999 pkgrel=0 pkgdesc="Linux camera framework" @@ -35,21 +36,26 @@ subpackages=" $pkgname-dbg $pkgname-dev $pkgname-doc - qcam $pkgname-gstreamer - $pkgname-v4l2 $pkgname-tools " +provides=" + $basepkgname + $basepkgname-dbg + $basepkgname-dev + $basepkgname-doc + $basepkgname-gstreamer + $basepkgname-tools +" source="" - -builddir="$srcdir/$pkgname-v$_pkgver" +builddir="$srcdir/$pkgname" # gstreamer tests fail # manual strip because ipa .sign files depend on the file contents- have to re-sign after strip options="!strip !check" case "$CARCH" in arm*|aarch64) - subpackages="$subpackages $pkgname-raspberrypi" + subpackages="$subpackages" ;; esac @@ -58,8 +64,7 @@ ppc64le|s390x|riscv64|loongarch64) # doesn't install any ipa ;; *) - # WIP: HACK? Don't depend on this this shit - # depends="$pkgname-ipa=$pkgver-r$pkgrel" + depends="$pkgname-ipa" subpackages="$subpackages $pkgname-ipa" ;; esac @@ -67,12 +72,18 @@ esac build() { abuild-meson \ -Dtest=false \ - -Dv4l2=true \ + -Dv4l2=false \ -Dwerror=false \ + -Dpipelines=simple \ + -Dipas=simple \ . output meson compile -C output } +check() { + meson test -C output --print-errorlogs +} + package() { DESTDIR="$pkgdir" meson install --no-rebuild -C output @@ -101,28 +112,11 @@ ipa() { done } -qcam() { - depends="" - amove usr/bin/qcam -} - gstreamer() { depends="" amove usr/lib/gstreamer-1.0 } -v4l2() { - depends="" - amove usr/libexec/libcamera/v4l2-compat.so -} - -raspberrypi() { - depends="" - amove usr/share/libcamera/ipa/rpi - amove usr/libexec/libcamera/raspberrypi_ipa_proxy - amove usr/share/libcamera/pipeline/rpi/vc4 -} - tools() { depends="" amove usr/bin/cam diff --git a/src/android/camera_device.cpp b/src/android/camera_device.cpp index 80ff248c2..a038131ae 100644 --- a/src/android/camera_device.cpp +++ b/src/android/camera_device.cpp @@ -1079,7 +1079,7 @@ int CameraDevice::processCaptureRequest(camera3_capture_request_t *camera3Reques buffer.internalBuffer = frameBuffer; descriptor->request_->addBuffer(sourceStream->stream(), - frameBuffer); + frameBuffer, nullptr); requestedStreams.insert(sourceStream); } diff --git a/src/apps/common/image.cpp b/src/apps/common/image.cpp index 9a67238aa..a2a0f58f3 100644 --- a/src/apps/common/image.cpp +++ b/src/apps/common/image.cpp @@ -98,12 +98,12 @@ unsigned int Image::numPlanes() const Span Image::data(unsigned int plane) { - assert(plane < planes_.size()); + assert(plane <= planes_.size()); return planes_[plane]; } Span Image::data(unsigned int plane) const { - assert(plane < planes_.size()); + assert(plane <= planes_.size()); return planes_[plane]; } diff --git a/src/apps/qcam/assets/shader/shaders.qrc b/src/apps/qcam/assets/shader/shaders.qrc index 96c709f92..32dfa51bf 100644 --- a/src/apps/qcam/assets/shader/shaders.qrc +++ b/src/apps/qcam/assets/shader/shaders.qrc @@ -1,13 +1,13 @@ - RGB.frag - YUV_2_planes.frag - YUV_3_planes.frag - YUV_packed.frag - bayer_1x_packed.frag - bayer_8.frag - bayer_8.vert - identity.vert + ../../../../../include/libcamera/internal/shaders/RGB.frag + ../../../../../include/libcamera/internal/shaders/YUV_2_planes.frag + ../../../../../include/libcamera/internal/shaders/YUV_3_planes.frag + ../../../../../include/libcamera/internal/shaders/YUV_packed.frag + ../../../../../include/libcamera/internal/shaders/bayer_1x_packed.frag + ../../../../../include/libcamera/internal/shaders/bayer_unpacked.frag + ../../../../../include/libcamera/internal/shaders/bayer_unpacked.vert + ../../../../../include/libcamera/internal/shaders/identity.vert diff --git a/src/apps/qcam/viewfinder_gl.cpp b/src/apps/qcam/viewfinder_gl.cpp index f31956ff0..95965ab71 100644 --- a/src/apps/qcam/viewfinder_gl.cpp +++ b/src/apps/qcam/viewfinder_gl.cpp @@ -141,7 +141,7 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format) textureMinMagFilters_ = GL_LINEAR; /* Use identity.vert as the default vertex shader. */ - vertexShaderFile_ = ":identity.vert"; + vertexShaderFile_ = ":include/libcamera/internal/shaders/identity.vert"; fragmentShaderDefines_.clear(); @@ -150,170 +150,170 @@ bool ViewFinderGL::selectFormat(const libcamera::PixelFormat &format) horzSubSample_ = 2; vertSubSample_ = 2; fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::NV21: horzSubSample_ = 2; vertSubSample_ = 2; fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::NV16: horzSubSample_ = 2; vertSubSample_ = 1; fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::NV61: horzSubSample_ = 2; vertSubSample_ = 1; fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::NV24: horzSubSample_ = 1; vertSubSample_ = 1; fragmentShaderDefines_.append("#define YUV_PATTERN_UV"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::NV42: horzSubSample_ = 1; vertSubSample_ = 1; fragmentShaderDefines_.append("#define YUV_PATTERN_VU"); - fragmentShaderFile_ = ":YUV_2_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_2_planes.frag"; break; case libcamera::formats::YUV420: horzSubSample_ = 2; vertSubSample_ = 2; - fragmentShaderFile_ = ":YUV_3_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_3_planes.frag"; break; case libcamera::formats::YVU420: horzSubSample_ = 2; vertSubSample_ = 2; - fragmentShaderFile_ = ":YUV_3_planes.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_3_planes.frag"; break; case libcamera::formats::UYVY: fragmentShaderDefines_.append("#define YUV_PATTERN_UYVY"); - fragmentShaderFile_ = ":YUV_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_packed.frag"; break; case libcamera::formats::VYUY: fragmentShaderDefines_.append("#define YUV_PATTERN_VYUY"); - fragmentShaderFile_ = ":YUV_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_packed.frag"; break; case libcamera::formats::YUYV: fragmentShaderDefines_.append("#define YUV_PATTERN_YUYV"); - fragmentShaderFile_ = ":YUV_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_packed.frag"; break; case libcamera::formats::YVYU: fragmentShaderDefines_.append("#define YUV_PATTERN_YVYU"); - fragmentShaderFile_ = ":YUV_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/YUV_packed.frag"; break; case libcamera::formats::ABGR8888: fragmentShaderDefines_.append("#define RGB_PATTERN rgb"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::ARGB8888: fragmentShaderDefines_.append("#define RGB_PATTERN bgr"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::BGRA8888: fragmentShaderDefines_.append("#define RGB_PATTERN gba"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::RGBA8888: fragmentShaderDefines_.append("#define RGB_PATTERN abg"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::BGR888: fragmentShaderDefines_.append("#define RGB_PATTERN rgb"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::RGB888: fragmentShaderDefines_.append("#define RGB_PATTERN bgr"); - fragmentShaderFile_ = ":RGB.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/RGB.frag"; break; case libcamera::formats::SBGGR8: firstRed_.setX(1.0); firstRed_.setY(1.0); - vertexShaderFile_ = ":bayer_8.vert"; - fragmentShaderFile_ = ":bayer_8.frag"; + vertexShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.vert"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGBRG8: firstRed_.setX(0.0); firstRed_.setY(1.0); - vertexShaderFile_ = ":bayer_8.vert"; - fragmentShaderFile_ = ":bayer_8.frag"; + vertexShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.vert"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGRBG8: firstRed_.setX(1.0); firstRed_.setY(0.0); - vertexShaderFile_ = ":bayer_8.vert"; - fragmentShaderFile_ = ":bayer_8.frag"; + vertexShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.vert"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SRGGB8: firstRed_.setX(0.0); firstRed_.setY(0.0); - vertexShaderFile_ = ":bayer_8.vert"; - fragmentShaderFile_ = ":bayer_8.frag"; + vertexShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.vert"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_unpacked.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SBGGR10_CSI2P: firstRed_.setX(1.0); firstRed_.setY(1.0); fragmentShaderDefines_.append("#define RAW10P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGBRG10_CSI2P: firstRed_.setX(0.0); firstRed_.setY(1.0); fragmentShaderDefines_.append("#define RAW10P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGRBG10_CSI2P: firstRed_.setX(1.0); firstRed_.setY(0.0); fragmentShaderDefines_.append("#define RAW10P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SRGGB10_CSI2P: firstRed_.setX(0.0); firstRed_.setY(0.0); fragmentShaderDefines_.append("#define RAW10P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SBGGR12_CSI2P: firstRed_.setX(1.0); firstRed_.setY(1.0); fragmentShaderDefines_.append("#define RAW12P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGBRG12_CSI2P: firstRed_.setX(0.0); firstRed_.setY(1.0); fragmentShaderDefines_.append("#define RAW12P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SGRBG12_CSI2P: firstRed_.setX(1.0); firstRed_.setY(0.0); fragmentShaderDefines_.append("#define RAW12P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; case libcamera::formats::SRGGB12_CSI2P: firstRed_.setX(0.0); firstRed_.setY(0.0); fragmentShaderDefines_.append("#define RAW12P"); - fragmentShaderFile_ = ":bayer_1x_packed.frag"; + fragmentShaderFile_ = ":include/libcamera/internal/shaders/bayer_1x_packed.frag"; textureMinMagFilters_ = GL_NEAREST; break; default: diff --git a/src/gstreamer/gstlibcamera-controls.cpp.in b/src/gstreamer/gstlibcamera-controls.cpp.in index 2a16b39a9..89c530da0 100644 --- a/src/gstreamer/gstlibcamera-controls.cpp.in +++ b/src/gstreamer/gstlibcamera-controls.cpp.in @@ -68,7 +68,7 @@ static const GEnumValue {{ ctrl.name|snake_case }}_types[] = { "{{ enum.gst_name }}" }, {%- endfor %} - {0, nullptr, nullptr} + {0, NULL, NULL} }; #define TYPE_{{ ctrl.name|snake_case|upper }} \ diff --git a/src/gstreamer/gstlibcamerapad.cpp b/src/gstreamer/gstlibcamerapad.cpp index 22b967198..3bc2bc87e 100644 --- a/src/gstreamer/gstlibcamerapad.cpp +++ b/src/gstreamer/gstlibcamerapad.cpp @@ -72,10 +72,6 @@ gst_libcamera_pad_query(GstPad *pad, GstObject *parent, GstQuery *query) if (query->type != GST_QUERY_LATENCY) return gst_pad_query_default(pad, parent, query); - GLibLocker lock(GST_OBJECT(self)); - if (self->latency == GST_CLOCK_TIME_NONE) - return FALSE; - /* TRUE here means live, we assumes that max latency is the same as min * as we have no idea that duration of frames. */ gst_query_set_latency(query, TRUE, self->latency, self->latency); @@ -85,7 +81,6 @@ gst_libcamera_pad_query(GstPad *pad, GstObject *parent, GstQuery *query) static void gst_libcamera_pad_init(GstLibcameraPad *self) { - self->latency = GST_CLOCK_TIME_NONE; GST_PAD_QUERYFUNC(self) = gst_libcamera_pad_query; } @@ -107,7 +102,7 @@ gst_libcamera_stream_role_get_type() "libcamera::Viewfinder", "view-finder", }, - { 0, nullptr, nullptr } + { 0, NULL, NULL } }; if (!type) diff --git a/src/gstreamer/gstlibcameraprovider.cpp b/src/gstreamer/gstlibcameraprovider.cpp index 08862363c..5da96ea3f 100644 --- a/src/gstreamer/gstlibcameraprovider.cpp +++ b/src/gstreamer/gstlibcameraprovider.cpp @@ -32,7 +32,7 @@ GST_DEBUG_CATEGORY_STATIC(provider_debug); */ enum { - PROP_DEVICE_ = 1, + PROP_DEVICE_NAME = 1, }; #define GST_TYPE_LIBCAMERA_DEVICE gst_libcamera_device_get_type() @@ -76,11 +76,14 @@ gst_libcamera_device_reconfigure_element(GstDevice *device, static void gst_libcamera_device_set_property(GObject *object, guint prop_id, - [[maybe_unused]]const GValue *value, GParamSpec *pspec) + const GValue *value, GParamSpec *pspec) { - // GstLibcameraDevice *device = GST_LIBCAMERA_DEVICE(object); + GstLibcameraDevice *device = GST_LIBCAMERA_DEVICE(object); switch (prop_id) { + case PROP_DEVICE_NAME: + device->name = g_value_dup_string(value); + break; default: G_OBJECT_WARN_INVALID_PROPERTY_ID(object, prop_id, pspec); break; @@ -114,6 +117,12 @@ gst_libcamera_device_class_init(GstLibcameraDeviceClass *klass) object_class->set_property = gst_libcamera_device_set_property; object_class->finalize = gst_libcamera_device_finalize; + + GParamSpec *pspec = g_param_spec_string("name", "Name", + "The name of the camera device", "", + (GParamFlags)(G_PARAM_STATIC_STRINGS | G_PARAM_WRITABLE | + G_PARAM_CONSTRUCT_ONLY)); + g_object_class_install_property(object_class, PROP_DEVICE_NAME, pspec); } static GstDevice * diff --git a/src/gstreamer/gstlibcamerasrc.cpp b/src/gstreamer/gstlibcamerasrc.cpp index da8eb4e5c..b34f08977 100644 --- a/src/gstreamer/gstlibcamerasrc.cpp +++ b/src/gstreamer/gstlibcamerasrc.cpp @@ -29,8 +29,6 @@ #include #include -#include -#include #include #include @@ -236,8 +234,6 @@ GstLibcameraSrcState::requestCompleted(Request *request) GLibLocker locker(&lock_); controls_.readMetadata(request); - if(queuedRequests_.empty()) - return; wrap = std::move(queuedRequests_.front()); queuedRequests_.pop(); @@ -289,19 +285,10 @@ gst_libcamera_extrapolate_info(GstVideoInfo *info, guint32 stride) } static GstFlowReturn -gst_libcamera_video_frame_copy(GstBuffer *src, GstBuffer *dest, - const GstVideoInfo *dest_info, guint32 stride) +gst_libcamera_video_frame_copy(GstBuffer *src, GstBuffer *dest, const GstVideoInfo *dest_info, guint32 stride) { - /* - * When dropping support for versions earlier than v1.22.0, use - * - * g_auto (GstVideoFrame) src_frame = GST_VIDEO_FRAME_INIT; - * g_auto (GstVideoFrame) dest_frame = GST_VIDEO_FRAME_INIT; - * - * and drop the gst_video_frame_unmap() calls. - */ - GstVideoFrame src_frame, dest_frame; GstVideoInfo src_info = *dest_info; + GstVideoFrame src_frame, dest_frame; gst_libcamera_extrapolate_info(&src_info, stride); src_info.size = gst_buffer_get_size(src); @@ -311,12 +298,7 @@ gst_libcamera_video_frame_copy(GstBuffer *src, GstBuffer *dest, return GST_FLOW_ERROR; } - /* - * When dropping support for versions earlier than 1.20.0, drop the - * const_cast<>(). - */ - if (!gst_video_frame_map(&dest_frame, const_cast(dest_info), - dest, GST_MAP_WRITE)) { + if (!gst_video_frame_map(&dest_frame, const_cast(dest_info), dest, GST_MAP_WRITE)) { GST_ERROR("Could not map dest buffer"); gst_video_frame_unmap(&src_frame); return GST_FLOW_ERROR; @@ -370,10 +352,10 @@ int GstLibcameraSrcState::processRequest() if (video_pool) { /* Only set video pool when a copy is needed. */ - GstBuffer *copy = nullptr; + GstBuffer *copy = NULL; const GstVideoInfo info = gst_libcamera_pad_get_video_info(srcpad); - ret = gst_buffer_pool_acquire_buffer(video_pool, ©, nullptr); + ret = gst_buffer_pool_acquire_buffer(video_pool, ©, NULL); if (ret != GST_FLOW_OK) { gst_buffer_unref(buffer); GST_ELEMENT_ERROR(src_, RESOURCE, SETTINGS, @@ -525,73 +507,6 @@ gst_libcamera_src_open(GstLibcameraSrc *self) return true; } -/** - * \brief Create a video pool for a pad - * \param[in] self The libcamerasrc instance - * \param[in] srcpad The pad - * \param[in] caps The pad caps - * \param[in] info The video info for the pad - * - * This function creates and returns a video buffer pool for the given pad if - * needed to accommodate stride mismatch. If the peer element supports stride - * negotiation through the meta API, no pool is needed and the function will - * return a null pool. - * - * \return A tuple containing the video buffers pool pointer and an error code - */ -static std::tuple -gst_libcamera_create_video_pool(GstLibcameraSrc *self, GstPad *srcpad, - GstCaps *caps, const GstVideoInfo *info) -{ - g_autoptr(GstQuery) query = nullptr; - g_autoptr(GstBufferPool) pool = nullptr; - const gboolean need_pool = true; - - /* - * Get the peer allocation hints to check if it supports the meta API. - * If so, the stride will be negotiated, and there's no need to create a - * video pool. - */ - query = gst_query_new_allocation(caps, need_pool); - - if (!gst_pad_peer_query(srcpad, query)) - GST_DEBUG_OBJECT(self, "Didn't get downstream ALLOCATION hints"); - else if (gst_query_find_allocation_meta(query, GST_VIDEO_META_API_TYPE, nullptr)) - return { nullptr, 0 }; - - GST_WARNING_OBJECT(self, "Downstream doesn't support video meta, need to copy frame."); - - /* - * If the allocation query has pools, use the first one. Otherwise, - * create a new pool. - */ - if (gst_query_get_n_allocation_pools(query) > 0) - gst_query_parse_nth_allocation_pool(query, 0, &pool, nullptr, - nullptr, nullptr); - - if (!pool) { - GstStructure *config; - guint min_buffers = 3; - - pool = gst_video_buffer_pool_new(); - config = gst_buffer_pool_get_config(pool); - gst_buffer_pool_config_set_params(config, caps, info->size, min_buffers, 0); - - GST_DEBUG_OBJECT(self, "Own pool config is %" GST_PTR_FORMAT, config); - - gst_buffer_pool_set_config(GST_BUFFER_POOL_CAST(pool), config); - } - - if (!gst_buffer_pool_set_active(pool, true)) { - GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS, - ("Failed to active buffer pool"), - ("gst_libcamera_src_negotiate() failed.")); - return { nullptr, -EINVAL }; - } - - return { std::exchange(pool, nullptr), 0 }; -} - /* Must be called with stream_lock held. */ static bool gst_libcamera_src_negotiate(GstLibcameraSrc *self) @@ -663,7 +578,7 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self) for (gsize i = 0; i < state->srcpads_.size(); i++) { GstPad *srcpad = state->srcpads_[i]; const StreamConfiguration &stream_cfg = state->config_->at(i); - GstBufferPool *video_pool = nullptr; + GstBufferPool *video_pool = NULL; GstVideoInfo info; g_autoptr(GstCaps) caps = gst_libcamera_stream_configuration_to_caps(stream_cfg, transfer[i]); @@ -674,13 +589,50 @@ gst_libcamera_src_negotiate(GstLibcameraSrc *self) /* Stride mismatch between camera stride and that calculated by video-info. */ if (static_cast(info.stride[0]) != stream_cfg.stride && GST_VIDEO_INFO_FORMAT(&info) != GST_VIDEO_FORMAT_ENCODED) { + GstQuery *query = NULL; + const gboolean need_pool = true; + gboolean has_video_meta = false; + gst_libcamera_extrapolate_info(&info, stream_cfg.stride); - std::tie(video_pool, ret) = - gst_libcamera_create_video_pool(self, srcpad, - caps, &info); - if (ret) - return false; + query = gst_query_new_allocation(caps, need_pool); + if (!gst_pad_peer_query(srcpad, query)) + GST_DEBUG_OBJECT(self, "Didn't get downstream ALLOCATION hints"); + else + has_video_meta = gst_query_find_allocation_meta(query, GST_VIDEO_META_API_TYPE, NULL); + + if (!has_video_meta) { + GstBufferPool *pool = NULL; + + if (gst_query_get_n_allocation_pools(query) > 0) + gst_query_parse_nth_allocation_pool(query, 0, &pool, NULL, NULL, NULL); + + if (pool) + video_pool = pool; + else { + GstStructure *config; + guint min_buffers = 3; + video_pool = gst_video_buffer_pool_new(); + + config = gst_buffer_pool_get_config(video_pool); + gst_buffer_pool_config_set_params(config, caps, info.size, min_buffers, 0); + + GST_DEBUG_OBJECT(self, "Own pool config is %" GST_PTR_FORMAT, config); + + gst_buffer_pool_set_config(GST_BUFFER_POOL_CAST(video_pool), config); + } + + GST_WARNING_OBJECT(self, "Downstream doesn't support video meta, need to copy frame."); + + if (!gst_buffer_pool_set_active(video_pool, true)) { + gst_caps_unref(caps); + GST_ELEMENT_ERROR(self, RESOURCE, SETTINGS, + ("Failed to active buffer pool"), + ("gst_libcamera_src_negotiate() failed.")); + return false; + } + } + gst_query_unref(query); } GstLibcameraPool *pool = gst_libcamera_pool_new(self->allocator, @@ -883,10 +835,8 @@ gst_libcamera_src_task_leave([[maybe_unused]] GstTask *task, { GLibRecLocker locker(&self->stream_lock); - for (GstPad *srcpad : state->srcpads_) { - gst_libcamera_pad_set_latency(srcpad, GST_CLOCK_TIME_NONE); + for (GstPad *srcpad : state->srcpads_) gst_libcamera_pad_set_pool(srcpad, nullptr); - } } g_clear_object(&self->allocator); @@ -1070,7 +1020,7 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ, const gchar *name, [[maybe_unused]] const GstCaps *caps) { GstLibcameraSrc *self = GST_LIBCAMERA_SRC(element); - g_autoptr(GstPad) pad = nullptr; + g_autoptr(GstPad) pad = NULL; GST_DEBUG_OBJECT(self, "new request pad created"); @@ -1084,12 +1034,12 @@ gst_libcamera_src_request_new_pad(GstElement *element, GstPadTemplate *templ, GST_ELEMENT_ERROR(element, STREAM, FAILED, ("Internal data stream error."), ("Could not add pad to element")); - return nullptr; + return NULL; } gst_child_proxy_child_added(GST_CHILD_PROXY(self), G_OBJECT(pad), GST_OBJECT_NAME(pad)); - return std::exchange(pad, nullptr); + return reinterpret_cast(g_steal_pointer(&pad)); } static void diff --git a/src/ipa/rkisp1/algorithms/filter.cpp b/src/ipa/rkisp1/algorithms/filter.cpp index 8ad798017..7598ef8a9 100644 --- a/src/ipa/rkisp1/algorithms/filter.cpp +++ b/src/ipa/rkisp1/algorithms/filter.cpp @@ -39,17 +39,6 @@ LOG_DEFINE_CATEGORY(RkISP1Filter) static constexpr uint32_t kFiltLumWeightDefault = 0x00022040; static constexpr uint32_t kFiltModeDefault = 0x000004f2; -/** - * \copydoc libcamera::ipa::Algorithm::init - */ -int Filter::init(IPAContext &context, - [[maybe_unused]] const YamlObject &tuningData) -{ - auto &cmap = context.ctrlMap; - cmap[&controls::Sharpness] = ControlInfo(0.0f, 10.0f, 1.0f); - - return 0; -} /** * \copydoc libcamera::ipa::Algorithm::queueRequest */ diff --git a/src/ipa/rkisp1/algorithms/filter.h b/src/ipa/rkisp1/algorithms/filter.h index 37d8938d3..8f858e574 100644 --- a/src/ipa/rkisp1/algorithms/filter.h +++ b/src/ipa/rkisp1/algorithms/filter.h @@ -21,7 +21,6 @@ public: Filter() = default; ~Filter() = default; - int init(IPAContext &context, const YamlObject &tuningData) override; void queueRequest(IPAContext &context, const uint32_t frame, IPAFrameContext &frameContext, const ControlList &controls) override; diff --git a/src/ipa/rkisp1/rkisp1.cpp b/src/ipa/rkisp1/rkisp1.cpp index cf66d5553..1ed7d7d92 100644 --- a/src/ipa/rkisp1/rkisp1.cpp +++ b/src/ipa/rkisp1/rkisp1.cpp @@ -116,6 +116,7 @@ const IPAHwSettings ipaHwSettingsV12{ /* List of controls handled by the RkISP1 IPA */ const ControlInfoMap::Map rkisp1Controls{ { &controls::DebugMetadataEnable, ControlInfo(false, true, false) }, + { &controls::Sharpness, ControlInfo(0.0f, 10.0f, 1.0f) }, { &controls::draft::NoiseReductionMode, ControlInfo(controls::draft::NoiseReductionModeValues) }, }; diff --git a/src/ipa/rpi/common/ipa_base.cpp b/src/ipa/rpi/common/ipa_base.cpp index a5bdcbb58..e0f8b7e78 100644 --- a/src/ipa/rpi/common/ipa_base.cpp +++ b/src/ipa/rpi/common/ipa_base.cpp @@ -58,24 +58,23 @@ const ControlInfoMap::Map ipaControls{ /* \todo Move this to the Camera class */ { &controls::AeEnable, ControlInfo(false, true, true) }, { &controls::ExposureTimeMode, - ControlInfo({ { ControlValue(controls::ExposureTimeModeAuto), - ControlValue(controls::ExposureTimeModeManual) } }, - ControlValue(controls::ExposureTimeModeAuto)) }, + ControlInfo(static_cast(controls::ExposureTimeModeAuto), + static_cast(controls::ExposureTimeModeManual), + static_cast(controls::ExposureTimeModeAuto)) }, { &controls::ExposureTime, ControlInfo(1, 66666, static_cast(defaultExposureTime.get())) }, { &controls::AnalogueGainMode, - ControlInfo({ { ControlValue(controls::AnalogueGainModeAuto), - ControlValue(controls::AnalogueGainModeManual) } }, - ControlValue(controls::AnalogueGainModeAuto)) }, + ControlInfo(static_cast(controls::AnalogueGainModeAuto), + static_cast(controls::AnalogueGainModeManual), + static_cast(controls::AnalogueGainModeAuto)) }, { &controls::AnalogueGain, ControlInfo(1.0f, 16.0f, 1.0f) }, { &controls::AeMeteringMode, ControlInfo(controls::AeMeteringModeValues) }, { &controls::AeConstraintMode, ControlInfo(controls::AeConstraintModeValues) }, { &controls::AeExposureMode, ControlInfo(controls::AeExposureModeValues) }, { &controls::ExposureValue, ControlInfo(-8.0f, 8.0f, 0.0f) }, - { &controls::AeFlickerMode, - ControlInfo({ { ControlValue(controls::FlickerOff), - ControlValue(controls::FlickerManual) } }, - ControlValue(controls::FlickerOff)) }, + { &controls::AeFlickerMode, ControlInfo(static_cast(controls::FlickerOff), + static_cast(controls::FlickerManual), + static_cast(controls::FlickerOff)) }, { &controls::AeFlickerPeriod, ControlInfo(100, 1000000) }, { &controls::Brightness, ControlInfo(-1.0f, 1.0f, 0.0f) }, { &controls::Contrast, ControlInfo(0.0f, 32.0f, 1.0f) }, @@ -233,6 +232,25 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa agcStatus.analogueGain = defaultAnalogueGain; applyAGC(&agcStatus, ctrls); + /* + * Set the lens to the default (typically hyperfocal) position + * on first start. + */ + if (lensPresent_) { + RPiController::AfAlgorithm *af = + dynamic_cast(controller_.getAlgorithm("af")); + + if (af) { + float defaultPos = + ipaAfControls.at(&controls::LensPosition).def().get(); + ControlList lensCtrl(lensCtrls_); + int32_t hwpos; + + af->setLensPosition(defaultPos, &hwpos); + lensCtrl.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos); + result->lensControls = std::move(lensCtrl); + } + } } result->sensorControls = std::move(ctrls); @@ -262,20 +280,8 @@ int32_t IpaBase::configure(const IPACameraSensorInfo &sensorInfo, const ConfigPa ctrlMap.merge(ControlInfoMap::Map(ipaColourControls)); /* Declare Autofocus controls, only if we have a controllable lens */ - if (lensPresent_) { + if (lensPresent_) ctrlMap.merge(ControlInfoMap::Map(ipaAfControls)); - RPiController::AfAlgorithm *af = - dynamic_cast(controller_.getAlgorithm("af")); - if (af) { - double min, max, dflt; - af->getLensLimits(min, max); - dflt = af->getDefaultLensPosition(); - ctrlMap[&controls::LensPosition] = - ControlInfo(static_cast(min), - static_cast(max), - static_cast(dflt)); - } - } result->controlInfo = ControlInfoMap(std::move(ctrlMap), controls::controls); @@ -313,35 +319,14 @@ void IpaBase::start(const ControlList &controls, StartResult *result) /* Make a note of this as it tells us the HDR status of the first few frames. */ hdrStatus_ = agcStatus.hdr; - /* - * AF: If no lens position was specified, drive lens to a default position. - * This had to be deferred (not initialised by a constructor) until here - * to ensure that exactly ONE starting position is sent to the lens driver. - * It should be the static API default, not dependent on AF range or mode. - */ - if (firstStart_ && lensPresent_) { - RPiController::AfAlgorithm *af = dynamic_cast( - controller_.getAlgorithm("af")); - if (af && !af->getLensPosition()) { - int32_t hwpos; - double pos = af->getDefaultLensPosition(); - if (af->setLensPosition(pos, &hwpos, true)) { - ControlList lensCtrls(lensCtrls_); - lensCtrls.set(V4L2_CID_FOCUS_ABSOLUTE, hwpos); - setLensControls.emit(lensCtrls); - } - } - } - /* * Initialise frame counts, and decide how many frames must be hidden or * "mistrusted", which depends on whether this is a startup from cold, * or merely a mode switch in a running system. */ - unsigned int agcConvergenceFrames = 0, awbConvergenceFrames = 0; frameCount_ = 0; if (firstStart_) { - invalidCount_ = helper_->hideFramesStartup(); + dropFrameCount_ = helper_->hideFramesStartup(); mistrustCount_ = helper_->mistrustFramesStartup(); /* @@ -351,6 +336,7 @@ void IpaBase::start(const ControlList &controls, StartResult *result) * (mistrustCount_) that they won't see. But if zero (i.e. * no convergence necessary), no frames need to be dropped. */ + unsigned int agcConvergenceFrames = 0; RPiController::AgcAlgorithm *agc = dynamic_cast( controller_.getAlgorithm("agc")); if (agc) { @@ -359,6 +345,7 @@ void IpaBase::start(const ControlList &controls, StartResult *result) agcConvergenceFrames += mistrustCount_; } + unsigned int awbConvergenceFrames = 0; RPiController::AwbAlgorithm *awb = dynamic_cast( controller_.getAlgorithm("awb")); if (awb) { @@ -366,18 +353,15 @@ void IpaBase::start(const ControlList &controls, StartResult *result) if (awbConvergenceFrames) awbConvergenceFrames += mistrustCount_; } + + dropFrameCount_ = std::max({ dropFrameCount_, agcConvergenceFrames, awbConvergenceFrames }); + LOG(IPARPI, Debug) << "Drop " << dropFrameCount_ << " frames on startup"; } else { - invalidCount_ = helper_->hideFramesModeSwitch(); + dropFrameCount_ = helper_->hideFramesModeSwitch(); mistrustCount_ = helper_->mistrustFramesModeSwitch(); } - result->startupFrameCount = std::max({ agcConvergenceFrames, awbConvergenceFrames }); - result->invalidFrameCount = invalidCount_; - - invalidCount_ = std::max({ invalidCount_, agcConvergenceFrames, awbConvergenceFrames }); - - LOG(IPARPI, Debug) << "Startup frames: " << result->startupFrameCount - << " Invalid frames: " << result->invalidFrameCount; + result->dropFrameCount = dropFrameCount_; firstStart_ = false; lastRunTimestamp_ = 0; @@ -457,7 +441,7 @@ void IpaBase::prepareIsp(const PrepareParams ¶ms) /* Allow a 10% margin on the comparison below. */ Duration delta = (frameTimestamp - lastRunTimestamp_) * 1.0ns; - if (lastRunTimestamp_ && frameCount_ > invalidCount_ && + if (lastRunTimestamp_ && frameCount_ > dropFrameCount_ && delta < controllerMinFrameDuration * 0.9 && !hdrChange) { /* * Ensure we merge the previous frame's metadata with the current diff --git a/src/ipa/rpi/common/ipa_base.h b/src/ipa/rpi/common/ipa_base.h index e818104ba..1a811beb3 100644 --- a/src/ipa/rpi/common/ipa_base.h +++ b/src/ipa/rpi/common/ipa_base.h @@ -115,8 +115,8 @@ private: /* How many frames we should avoid running control algos on. */ unsigned int mistrustCount_; - /* Number of frames that need to be marked as dropped on startup. */ - unsigned int invalidCount_; + /* Number of frames that need to be dropped on startup. */ + unsigned int dropFrameCount_; /* Frame timestamp for the last run of the controller. */ uint64_t lastRunTimestamp_; diff --git a/src/ipa/rpi/controller/af_algorithm.h b/src/ipa/rpi/controller/af_algorithm.h index 382609f9b..ad9b57545 100644 --- a/src/ipa/rpi/controller/af_algorithm.h +++ b/src/ipa/rpi/controller/af_algorithm.h @@ -33,10 +33,6 @@ public: * * getMode() is provided mainly for validating controls. * getLensPosition() is provided for populating DeviceStatus. - * - * getDefaultlensPosition() and getLensLimits() were added for - * populating ControlInfoMap. They return the static API limits - * which should be independent of the current range or mode. */ enum AfRange { AfRangeNormal = 0, @@ -70,9 +66,7 @@ public: } virtual void setMode(AfMode mode) = 0; virtual AfMode getMode() const = 0; - virtual double getDefaultLensPosition() const = 0; - virtual void getLensLimits(double &min, double &max) const = 0; - virtual bool setLensPosition(double dioptres, int32_t *hwpos, bool force = false) = 0; + virtual bool setLensPosition(double dioptres, int32_t *hwpos) = 0; virtual std::optional getLensPosition() const = 0; virtual void triggerScan() = 0; virtual void cancelScan() = 0; diff --git a/src/ipa/rpi/controller/rpi/af.cpp b/src/ipa/rpi/controller/rpi/af.cpp index 26e599303..2157eb94f 100644 --- a/src/ipa/rpi/controller/rpi/af.cpp +++ b/src/ipa/rpi/controller/rpi/af.cpp @@ -46,8 +46,6 @@ Af::SpeedDependentParams::SpeedDependentParams() : stepCoarse(1.0), stepFine(0.25), contrastRatio(0.75), - retriggerRatio(0.75), - retriggerDelay(10), pdafGain(-0.02), pdafSquelch(0.125), maxSlew(2.0), @@ -62,7 +60,6 @@ Af::CfgParams::CfgParams() confThresh(16), confClip(512), skipFrames(5), - checkForIR(false), map() { } @@ -90,8 +87,6 @@ void Af::SpeedDependentParams::read(const libcamera::YamlObject ¶ms) readNumber(stepCoarse, params, "step_coarse"); readNumber(stepFine, params, "step_fine"); readNumber(contrastRatio, params, "contrast_ratio"); - readNumber(retriggerRatio, params, "retrigger_ratio"); - readNumber(retriggerDelay, params, "retrigger_delay"); readNumber(pdafGain, params, "pdaf_gain"); readNumber(pdafSquelch, params, "pdaf_squelch"); readNumber(maxSlew, params, "max_slew"); @@ -142,7 +137,6 @@ int Af::CfgParams::read(const libcamera::YamlObject ¶ms) readNumber(confThresh, params, "conf_thresh"); readNumber(confClip, params, "conf_clip"); readNumber(skipFrames, params, "skip_frames"); - readNumber(checkForIR, params, "check_for_ir"); if (params.contains("map")) map = params["map"].get(ipa::Pwl{}); @@ -182,38 +176,27 @@ Af::Af(Controller *controller) useWindows_(false), phaseWeights_(), contrastWeights_(), - awbWeights_(), scanState_(ScanState::Idle), initted_(false), - irFlag_(false), ftarget_(-1.0), fsmooth_(-1.0), prevContrast_(0.0), - oldSceneContrast_(0.0), - prevAverage_{ 0.0, 0.0, 0.0 }, - oldSceneAverage_{ 0.0, 0.0, 0.0 }, - prevPhase_(0.0), skipCount_(0), stepCount_(0), dropCount_(0), - sameSignCount_(0), - sceneChangeCount_(0), scanMaxContrast_(0.0), scanMinContrast_(1.0e9), - scanStep_(0.0), scanData_(), reportState_(AfState::Idle) { /* - * Reserve space for data structures, to reduce memory fragmentation. - * It's too early to query the size of the PDAF sensor data, so guess. + * Reserve space for data, to reduce memory fragmentation. It's too early + * to query the size of the PDAF (from camera) and Contrast (from ISP) + * statistics, but these are plausible upper bounds. */ - windows_.reserve(1); phaseWeights_.w.reserve(16 * 12); contrastWeights_.w.reserve(getHardwareConfig().focusRegions.width * getHardwareConfig().focusRegions.height); - contrastWeights_.w.reserve(getHardwareConfig().awbRegions.width * - getHardwareConfig().awbRegions.height); scanData_.reserve(32); } @@ -252,14 +235,13 @@ void Af::switchMode(CameraMode const &cameraMode, [[maybe_unused]] Metadata *met << statsRegion_.height; invalidateWeights(); - if (scanState_ >= ScanState::Coarse1 && scanState_ < ScanState::Settle) { + if (scanState_ >= ScanState::Coarse && scanState_ < ScanState::Settle) { /* * If a scan was in progress, re-start it, as CDAF statistics * may have changed. Though if the application is just about * to take a still picture, this will not help... */ startProgrammedScan(); - updateLensPosition(); } skipCount_ = cfg_.skipFrames; } @@ -325,7 +307,6 @@ void Af::invalidateWeights() { phaseWeights_.sum = 0; contrastWeights_.sum = 0; - awbWeights_.sum = 0; } bool Af::getPhase(PdafRegions const ®ions, double &phase, double &conf) @@ -347,8 +328,9 @@ bool Af::getPhase(PdafRegions const ®ions, double &phase, double &conf) if (c >= cfg_.confThresh) { if (c > cfg_.confClip) c = cfg_.confClip; - c -= (cfg_.confThresh >> 1); + c -= (cfg_.confThresh >> 2); sumWc += w * c; + c -= (cfg_.confThresh >> 2); sumWcp += (int64_t)(w * c) * (int64_t)data.phase; } } @@ -382,54 +364,6 @@ double Af::getContrast(const FocusRegions &focusStats) return (contrastWeights_.sum > 0) ? ((double)sumWc / (double)contrastWeights_.sum) : 0.0; } -/* - * Get the average R, G, B values in AF window[s] (from AWB statistics). - * Optionally, check if all of {R,G,B} are within 4:5 of each other - * across more than 50% of the counted area and within the AF window: - * for an RGB sensor this strongly suggests that IR lighting is in use. - */ - -bool Af::getAverageAndTestIr(const RgbyRegions &awbStats, double rgb[3]) -{ - libcamera::Size size = awbStats.size(); - if (size.height != awbWeights_.rows || - size.width != awbWeights_.cols || awbWeights_.sum == 0) { - LOG(RPiAf, Debug) << "Recompute RGB weights " << size.width << 'x' << size.height; - computeWeights(&awbWeights_, size.height, size.width); - } - - uint64_t sr = 0, sg = 0, sb = 0, sw = 1; - uint64_t greyCount = 0, allCount = 0; - for (unsigned i = 0; i < awbStats.numRegions(); ++i) { - uint64_t r = awbStats.get(i).val.rSum; - uint64_t g = awbStats.get(i).val.gSum; - uint64_t b = awbStats.get(i).val.bSum; - uint64_t w = awbWeights_.w[i]; - if (w) { - sw += w; - sr += w * r; - sg += w * g; - sb += w * b; - } - if (cfg_.checkForIR) { - if (4 * r < 5 * b && 4 * b < 5 * r && - 4 * r < 5 * g && 4 * g < 5 * r && - 4 * b < 5 * g && 4 * g < 5 * b) - greyCount += awbStats.get(i).counted; - allCount += awbStats.get(i).counted; - } - } - - rgb[0] = sr / (double)sw; - rgb[1] = sg / (double)sw; - rgb[2] = sb / (double)sw; - - return (cfg_.checkForIR && 2 * greyCount > allCount && - 4 * sr < 5 * sb && 4 * sb < 5 * sr && - 4 * sr < 5 * sg && 4 * sg < 5 * sr && - 4 * sb < 5 * sg && 4 * sg < 5 * sb); -} - void Af::doPDAF(double phase, double conf) { /* Apply loop gain */ @@ -476,7 +410,7 @@ void Af::doPDAF(double phase, double conf) bool Af::earlyTerminationByPhase(double phase) { if (scanData_.size() > 0 && - scanData_[scanData_.size() - 1].conf >= cfg_.confThresh) { + scanData_[scanData_.size() - 1].conf >= cfg_.confEpsilon) { double oldFocus = scanData_[scanData_.size() - 1].focus; double oldPhase = scanData_[scanData_.size() - 1].phase; @@ -485,12 +419,11 @@ bool Af::earlyTerminationByPhase(double phase) * Interpolate/extrapolate the lens position for zero phase. * Check that the extrapolation is well-conditioned. */ - if ((ftarget_ - oldFocus) * (phase - oldPhase) * cfg_.speeds[speed_].pdafGain < 0.0) { + if ((ftarget_ - oldFocus) * (phase - oldPhase) > 0.0) { double param = phase / (phase - oldPhase); - if ((-2.5 <= param || mode_ == AfModeContinuous) && param <= 3.0) { - LOG(RPiAf, Debug) << "ETBP: param=" << param; - param = std::max(param, -2.5); + if (-3.0 <= param && param <= 3.5) { ftarget_ += param * (oldFocus - ftarget_); + LOG(RPiAf, Debug) << "ETBP: param=" << param; return true; } } @@ -503,28 +436,15 @@ double Af::findPeak(unsigned i) const { double f = scanData_[i].focus; - if (scanData_.size() >= 3) { - /* - * Given the sample with the highest contrast score and its two - * neighbours either side (or same side if at the end of a scan), - * solve for the best lens position by fitting a parabola. - * Adapted from awb.cpp: interpolateQaudaratic() - */ - - if (i == 0) - i++; - else if (i + 1 >= scanData_.size()) - i--; - - double abx = scanData_[i - 1].focus - scanData_[i].focus; - double aby = scanData_[i - 1].contrast - scanData_[i].contrast; - double cbx = scanData_[i + 1].focus - scanData_[i].focus; - double cby = scanData_[i + 1].contrast - scanData_[i].contrast; - double denom = 2.0 * (aby * cbx - cby * abx); - if (std::abs(denom) >= (1.0 / 64.0) && denom * abx > 0.0) { - f = (aby * cbx * cbx - cby * abx * abx) / denom; - f = std::clamp(f, std::min(abx, cbx), std::max(abx, cbx)); - f += scanData_[i].focus; + if (i > 0 && i + 1 < scanData_.size()) { + double dropLo = scanData_[i].contrast - scanData_[i - 1].contrast; + double dropHi = scanData_[i].contrast - scanData_[i + 1].contrast; + if (0.0 <= dropLo && dropLo < dropHi) { + double param = 0.3125 * (1.0 - dropLo / dropHi) * (1.6 - dropLo / dropHi); + f += param * (scanData_[i - 1].focus - f); + } else if (0.0 <= dropHi && dropHi < dropLo) { + double param = 0.3125 * (1.0 - dropHi / dropLo) * (1.6 - dropHi / dropLo); + f += param * (scanData_[i + 1].focus - f); } } @@ -538,49 +458,36 @@ void Af::doScan(double contrast, double phase, double conf) if (scanData_.empty() || contrast > scanMaxContrast_) { scanMaxContrast_ = contrast; scanMaxIndex_ = scanData_.size(); - if (scanState_ != ScanState::Fine) - std::copy(prevAverage_, prevAverage_ + 3, oldSceneAverage_); } if (contrast < scanMinContrast_) scanMinContrast_ = contrast; scanData_.emplace_back(ScanRecord{ ftarget_, contrast, phase, conf }); - if ((scanStep_ >= 0.0 && ftarget_ >= cfg_.ranges[range_].focusMax) || - (scanStep_ <= 0.0 && ftarget_ <= cfg_.ranges[range_].focusMin) || - (scanState_ == ScanState::Fine && scanData_.size() >= 3) || - contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { - double pk = findPeak(scanMaxIndex_); - /* - * Finished a scan, by hitting a limit or due to constrast dropping off. - * If this is a first coarse scan and we didn't bracket the peak, reverse! - * If this is a fine scan, or no fine step was defined, we've finished. - * Otherwise, start fine scan in opposite direction. - */ - if (scanState_ == ScanState::Coarse1 && - scanData_[0].contrast >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { - scanStep_ = -scanStep_; - scanState_ = ScanState::Coarse2; - } else if (scanState_ == ScanState::Fine || cfg_.speeds[speed_].stepFine <= 0.0) { - ftarget_ = pk; + if (scanState_ == ScanState::Coarse) { + if (ftarget_ >= cfg_.ranges[range_].focusMax || + contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { + /* + * Finished course scan, or termination based on contrast. + * Jump to just after max contrast and start fine scan. + */ + ftarget_ = std::min(ftarget_, findPeak(scanMaxIndex_) + + 2.0 * cfg_.speeds[speed_].stepFine); + scanState_ = ScanState::Fine; + scanData_.clear(); + } else + ftarget_ += cfg_.speeds[speed_].stepCoarse; + } else { /* ScanState::Fine */ + if (ftarget_ <= cfg_.ranges[range_].focusMin || scanData_.size() >= 5 || + contrast < cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { + /* + * Finished fine scan, or termination based on contrast. + * Use quadratic peak-finding to find best contrast position. + */ + ftarget_ = findPeak(scanMaxIndex_); scanState_ = ScanState::Settle; - } else if (scanState_ == ScanState::Coarse1 && - scanData_[0].contrast >= cfg_.speeds[speed_].contrastRatio * scanMaxContrast_) { - scanStep_ = -scanStep_; - scanState_ = ScanState::Coarse2; - } else if (scanStep_ >= 0.0) { - ftarget_ = std::min(pk + cfg_.speeds[speed_].stepFine, - cfg_.ranges[range_].focusMax); - scanStep_ = -cfg_.speeds[speed_].stepFine; - scanState_ = ScanState::Fine; - } else { - ftarget_ = std::max(pk - cfg_.speeds[speed_].stepFine, - cfg_.ranges[range_].focusMin); - scanStep_ = cfg_.speeds[speed_].stepFine; - scanState_ = ScanState::Fine; - } - scanData_.clear(); - } else - ftarget_ += scanStep_; + } else + ftarget_ -= cfg_.speeds[speed_].stepFine; + } stepCount_ = (ftarget_ == fsmooth_) ? 0 : cfg_.speeds[speed_].stepFrames; } @@ -594,70 +501,26 @@ void Af::doAF(double contrast, double phase, double conf) return; } - /* Count frames for which PDAF phase has had same sign */ - if (phase * prevPhase_ <= 0.0) - sameSignCount_ = 0; - else - sameSignCount_++; - prevPhase_ = phase; - - if (mode_ == AfModeManual) - return; /* nothing to do */ - if (scanState_ == ScanState::Pdaf) { /* * Use PDAF closed-loop control whenever available, in both CAF * mode and (for a limited number of iterations) when triggered. - * If PDAF fails (due to poor contrast, noise or large defocus) - * for at least dropoutFrames, fall back to a CDAF-based scan - * immediately (in triggered-auto) or on scene change (in CAF). + * If PDAF fails (due to poor contrast, noise or large defocus), + * fall back to a CDAF-based scan. To avoid "nuisance" scans, + * scan only after a number of frames with low PDAF confidence. */ - if (conf >= cfg_.confEpsilon) { - if (mode_ == AfModeAuto || sameSignCount_ >= 3) - doPDAF(phase, conf); + if (conf > (dropCount_ ? 1.0 : 0.25) * cfg_.confEpsilon) { + doPDAF(phase, conf); if (stepCount_ > 0) stepCount_--; else if (mode_ != AfModeContinuous) scanState_ = ScanState::Idle; - oldSceneContrast_ = contrast; - std::copy(prevAverage_, prevAverage_ + 3, oldSceneAverage_); - sceneChangeCount_ = 0; dropCount_ = 0; - return; - } else { - dropCount_++; - if (dropCount_ < cfg_.speeds[speed_].dropoutFrames) - return; - if (mode_ != AfModeContinuous) { - startProgrammedScan(); - return; - } - /* else fall through to waiting for a scene change */ - } - } - if (scanState_ < ScanState::Coarse1 && mode_ == AfModeContinuous) { - /* - * In CAF mode, not in a scan, and PDAF is unavailable. - * Wait for a scene change, followed by stability. - */ - if (contrast + 1.0 < cfg_.speeds[speed_].retriggerRatio * oldSceneContrast_ || - oldSceneContrast_ + 1.0 < cfg_.speeds[speed_].retriggerRatio * contrast || - prevAverage_[0] + 1.0 < cfg_.speeds[speed_].retriggerRatio * oldSceneAverage_[0] || - oldSceneAverage_[0] + 1.0 < cfg_.speeds[speed_].retriggerRatio * prevAverage_[0] || - prevAverage_[1] + 1.0 < cfg_.speeds[speed_].retriggerRatio * oldSceneAverage_[1] || - oldSceneAverage_[1] + 1.0 < cfg_.speeds[speed_].retriggerRatio * prevAverage_[1] || - prevAverage_[2] + 1.0 < cfg_.speeds[speed_].retriggerRatio * oldSceneAverage_[2] || - oldSceneAverage_[2] + 1.0 < cfg_.speeds[speed_].retriggerRatio * prevAverage_[2]) { - oldSceneContrast_ = contrast; - std::copy(prevAverage_, prevAverage_ + 3, oldSceneAverage_); - sceneChangeCount_ = 1; - } else if (sceneChangeCount_) - sceneChangeCount_++; - if (sceneChangeCount_ >= cfg_.speeds[speed_].retriggerDelay) + } else if (++dropCount_ == cfg_.speeds[speed_].dropoutFrames) startProgrammedScan(); - } else if (scanState_ >= ScanState::Coarse1 && fsmooth_ == ftarget_) { + } else if (scanState_ >= ScanState::Coarse && fsmooth_ == ftarget_) { /* - * CDAF-based scanning sequence. + * Scanning sequence. This means PDAF has become unavailable. * Allow a delay between steps for CDAF FoM statistics to be * updated, and a "settling time" at the end of the sequence. * [A coarse or fine scan can be abandoned if two PDAF samples @@ -676,14 +539,11 @@ void Af::doAF(double contrast, double phase, double conf) scanState_ = ScanState::Pdaf; else scanState_ = ScanState::Idle; - dropCount_ = 0; - sceneChangeCount_ = 0; - oldSceneContrast_ = std::max(scanMaxContrast_, prevContrast_); scanData_.clear(); - } else if (conf >= cfg_.confThresh && earlyTerminationByPhase(phase)) { - std::copy(prevAverage_, prevAverage_ + 3, oldSceneAverage_); + } else if (conf >= cfg_.confEpsilon && earlyTerminationByPhase(phase)) { scanState_ = ScanState::Settle; - stepCount_ = (mode_ == AfModeContinuous) ? 0 : cfg_.speeds[speed_].stepFrames; + stepCount_ = (mode_ == AfModeContinuous) ? 0 + : cfg_.speeds[speed_].stepFrames; } else doScan(contrast, phase, conf); } @@ -713,8 +573,7 @@ void Af::updateLensPosition() void Af::startAF() { /* Use PDAF if the tuning file allows it; else CDAF. */ - if (cfg_.speeds[speed_].pdafGain != 0.0 && - cfg_.speeds[speed_].dropoutFrames > 0 && + if (cfg_.speeds[speed_].dropoutFrames > 0 && (mode_ == AfModeContinuous || cfg_.speeds[speed_].pdafFrames > 0)) { if (!initted_) { ftarget_ = cfg_.ranges[range_].focusDefault; @@ -724,30 +583,16 @@ void Af::startAF() scanState_ = ScanState::Pdaf; scanData_.clear(); dropCount_ = 0; - oldSceneContrast_ = 0.0; - sceneChangeCount_ = 0; reportState_ = AfState::Scanning; - } else { + } else startProgrammedScan(); - updateLensPosition(); - } } void Af::startProgrammedScan() { - if (!initted_ || mode_ != AfModeContinuous || - fsmooth_ <= cfg_.ranges[range_].focusMin + 2.0 * cfg_.speeds[speed_].stepCoarse) { - ftarget_ = cfg_.ranges[range_].focusMin; - scanStep_ = cfg_.speeds[speed_].stepCoarse; - scanState_ = ScanState::Coarse2; - } else if (fsmooth_ >= cfg_.ranges[range_].focusMax - 2.0 * cfg_.speeds[speed_].stepCoarse) { - ftarget_ = cfg_.ranges[range_].focusMax; - scanStep_ = -cfg_.speeds[speed_].stepCoarse; - scanState_ = ScanState::Coarse2; - } else { - scanStep_ = -cfg_.speeds[speed_].stepCoarse; - scanState_ = ScanState::Coarse1; - } + ftarget_ = cfg_.ranges[range_].focusMin; + updateLensPosition(); + scanState_ = ScanState::Coarse; scanMaxContrast_ = 0.0; scanMinContrast_ = 1.0e9; scanMaxIndex_ = 0; @@ -788,7 +633,7 @@ void Af::prepare(Metadata *imageMetadata) uint32_t oldSt = stepCount_; if (imageMetadata->get("pdaf.regions", regions) == 0) getPhase(regions, phase, conf); - doAF(prevContrast_, phase, irFlag_ ? 0 : conf); + doAF(prevContrast_, phase, conf); updateLensPosition(); LOG(RPiAf, Debug) << std::fixed << std::setprecision(2) << static_cast(reportState_) @@ -798,8 +643,7 @@ void Af::prepare(Metadata *imageMetadata) << " ft" << oldFt << "->" << ftarget_ << " fs" << oldFs << "->" << fsmooth_ << " cont=" << (int)prevContrast_ - << " phase=" << (int)phase << " conf=" << (int)conf - << (irFlag_ ? " IR" : ""); + << " phase=" << (int)phase << " conf=" << (int)conf; } /* Report status and produce new lens setting */ @@ -812,8 +656,6 @@ void Af::prepare(Metadata *imageMetadata) if (mode_ == AfModeAuto && scanState_ != ScanState::Idle) status.state = AfState::Scanning; - else if (mode_ == AfModeManual) - status.state = AfState::Idle; else status.state = reportState_; status.lensSetting = initted_ ? std::optional(cfg_.map.eval(fsmooth_)) @@ -825,7 +667,6 @@ void Af::process(StatisticsPtr &stats, [[maybe_unused]] Metadata *imageMetadata) { (void)imageMetadata; prevContrast_ = getContrast(stats->focusRegions); - irFlag_ = getAverageAndTestIr(stats->awbRegions, prevAverage_); } /* Controls */ @@ -874,23 +715,11 @@ void Af::setWindows(libcamera::Span const &wins) invalidateWeights(); } -double Af::getDefaultLensPosition() const -{ - return cfg_.ranges[AfRangeNormal].focusDefault; -} - -void Af::getLensLimits(double &min, double &max) const -{ - /* Limits for manual focus are set by map, not by ranges */ - min = cfg_.map.domain().start; - max = cfg_.map.domain().end; -} - -bool Af::setLensPosition(double dioptres, int *hwpos, bool force) +bool Af::setLensPosition(double dioptres, int *hwpos) { bool changed = false; - if (mode_ == AfModeManual || force) { + if (mode_ == AfModeManual) { LOG(RPiAf, Debug) << "setLensPosition: " << dioptres; ftarget_ = cfg_.map.domain().clamp(dioptres); changed = !(initted_ && fsmooth_ == ftarget_); @@ -934,7 +763,7 @@ void Af::setMode(AfAlgorithm::AfMode mode) pauseFlag_ = false; if (mode == AfModeContinuous) scanState_ = ScanState::Trigger; - else if (mode != AfModeAuto || scanState_ < ScanState::Coarse1) + else if (mode != AfModeAuto || scanState_ < ScanState::Coarse) goIdle(); } } @@ -950,14 +779,12 @@ void Af::pause(AfAlgorithm::AfPause pause) if (mode_ == AfModeContinuous) { if (pause == AfPauseResume && pauseFlag_) { pauseFlag_ = false; - if (scanState_ < ScanState::Coarse1) + if (scanState_ < ScanState::Coarse) scanState_ = ScanState::Trigger; } else if (pause != AfPauseResume && !pauseFlag_) { pauseFlag_ = true; - if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse1) { - scanState_ = ScanState::Idle; - scanData_.clear(); - } + if (pause == AfPauseImmediate || scanState_ < ScanState::Coarse) + goIdle(); } } } diff --git a/src/ipa/rpi/controller/rpi/af.h b/src/ipa/rpi/controller/rpi/af.h index d35a39d12..317a51f3e 100644 --- a/src/ipa/rpi/controller/rpi/af.h +++ b/src/ipa/rpi/controller/rpi/af.h @@ -15,28 +15,20 @@ /* * This algorithm implements a hybrid of CDAF and PDAF, favouring PDAF. * - * Whenever PDAF is available (and reports sufficiently high confidence), - * it is used for continuous feedback control of the lens position. When - * triggered in Auto mode, we enable the loop for a limited number of frames - * (it may terminate sooner if the phase becomes small). In CAF mode, the - * PDAF loop runs continuously. Very small lens movements are suppressed. + * Whenever PDAF is available, it is used in a continuous feedback loop. + * When triggered in auto mode, we simply enable AF for a limited number + * of frames (it may terminate early if the delta becomes small enough). * * When PDAF confidence is low (due e.g. to low contrast or extreme defocus) * or PDAF data are absent, fall back to CDAF with a programmed scan pattern. - * A coarse and fine scan are performed, using the ISP's CDAF contrast FoM - * to estimate the lens position with peak contrast. (This is slower due to - * extra latency in the ISP, and requires a settling time between steps.) - * The scan may terminate early if PDAF recovers and allows the zero-phase - * lens position to be interpolated. + * A coarse and fine scan are performed, using ISP's CDAF focus FoM to + * estimate the lens position with peak contrast. This is slower due to + * extra latency in the ISP, and requires a settling time between steps. * - * In CAF mode, the fallback to a CDAF scan is triggered when PDAF fails to - * report high confidence and a configurable number of frames have elapsed - * since the last image change since either PDAF was working or a previous - * scan found peak contrast. Image changes are detected using both contrast - * and AWB statistics (within the AF window[s]). + * Some hysteresis is applied to the switch between PDAF and CDAF, to avoid + * "nuisance" scans. During each interval where PDAF is not working, only + * ONE scan will be performed; CAF cannot track objects using CDAF alone. * - * IR lighting can interfere with the correct operation of PDAF, so we - * optionally try to detect it (from AWB statistics). */ namespace RPiController { @@ -62,9 +54,7 @@ public: void setWindows(libcamera::Span const &wins) override; void setMode(AfMode mode) override; AfMode getMode() const override; - double getDefaultLensPosition() const override; - void getLensLimits(double &min, double &max) const override; - bool setLensPosition(double dioptres, int32_t *hwpos, bool force) override; + bool setLensPosition(double dioptres, int32_t *hwpos) override; std::optional getLensPosition() const override; void triggerScan() override; void cancelScan() override; @@ -75,8 +65,7 @@ private: Idle = 0, Trigger, Pdaf, - Coarse1, - Coarse2, + Coarse, Fine, Settle }; @@ -91,11 +80,9 @@ private: }; struct SpeedDependentParams { - double stepCoarse; /* in dioptres; used for scans */ - double stepFine; /* in dioptres; used for scans */ + double stepCoarse; /* used for scans */ + double stepFine; /* used for scans */ double contrastRatio; /* used for scan termination and reporting */ - double retriggerRatio; /* contrast and RGB ratio for re-triggering */ - uint32_t retriggerDelay; /* frames of stability before re-triggering */ double pdafGain; /* coefficient for PDAF feedback loop */ double pdafSquelch; /* PDAF stability parameter (device-specific) */ double maxSlew; /* limit for lens movement per frame */ @@ -114,7 +101,6 @@ private: uint32_t confThresh; /* PDAF confidence cell min (sensor-specific) */ uint32_t confClip; /* PDAF confidence cell max (sensor-specific) */ uint32_t skipFrames; /* frames to skip at start or modeswitch */ - bool checkForIR; /* Set this if PDAF is unreliable in IR light */ libcamera::ipa::Pwl map; /* converts dioptres -> lens driver position */ CfgParams(); @@ -143,7 +129,6 @@ private: void invalidateWeights(); bool getPhase(PdafRegions const ®ions, double &phase, double &conf); double getContrast(const FocusRegions &focusStats); - bool getAverageAndTestIr(const RgbyRegions &awbStats, double rgb[3]); void doPDAF(double phase, double conf); bool earlyTerminationByPhase(double phase); double findPeak(unsigned index) const; @@ -165,20 +150,15 @@ private: bool useWindows_; RegionWeights phaseWeights_; RegionWeights contrastWeights_; - RegionWeights awbWeights_; /* Working state. */ ScanState scanState_; - bool initted_, irFlag_; + bool initted_; double ftarget_, fsmooth_; - double prevContrast_, oldSceneContrast_; - double prevAverage_[3], oldSceneAverage_[3]; - double prevPhase_; + double prevContrast_; unsigned skipCount_, stepCount_, dropCount_; - unsigned sameSignCount_; - unsigned sceneChangeCount_; unsigned scanMaxIndex_; - double scanMaxContrast_, scanMinContrast_, scanStep_; + double scanMaxContrast_, scanMinContrast_; std::vector scanData_; AfState reportState_; }; diff --git a/src/ipa/rpi/pisp/data/imx708.json b/src/ipa/rpi/pisp/data/imx708.json index 7f2e78655..e8d25c216 100644 --- a/src/ipa/rpi/pisp/data/imx708.json +++ b/src/ipa/rpi/pisp/data/imx708.json @@ -1139,27 +1139,11 @@ "step_coarse": 1.0, "step_fine": 0.25, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, - "pdaf_gain": -0.016, - "pdaf_squelch": 0.125, - "max_slew": 1.5, - "pdaf_frames": 20, - "dropout_frames": 6, - "step_frames": 5 - }, - "fast": - { - "step_coarse": 1.25, - "step_fine": 0.0, - "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.02, "pdaf_squelch": 0.125, "max_slew": 2.0, - "pdaf_frames": 16, - "dropout_frames": 4, + "pdaf_frames": 20, + "dropout_frames": 6, "step_frames": 4 } }, @@ -1167,7 +1151,6 @@ "conf_thresh": 16, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": false, "map": [ 0.0, 445, 15.0, 925 ] } }, @@ -1284,4 +1267,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/pisp/data/imx708_noir.json b/src/ipa/rpi/pisp/data/imx708_noir.json index c5e6a2652..e69afb0c6 100644 --- a/src/ipa/rpi/pisp/data/imx708_noir.json +++ b/src/ipa/rpi/pisp/data/imx708_noir.json @@ -1156,27 +1156,11 @@ "step_coarse": 1.0, "step_fine": 0.25, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, - "pdaf_gain": -0.016, - "pdaf_squelch": 0.125, - "max_slew": 1.5, - "pdaf_frames": 20, - "dropout_frames": 6, - "step_frames": 5 - }, - "fast": - { - "step_coarse": 1.25, - "step_fine": 0.0, - "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.02, "pdaf_squelch": 0.125, "max_slew": 2.0, - "pdaf_frames": 16, - "dropout_frames": 4, + "pdaf_frames": 20, + "dropout_frames": 6, "step_frames": 4 } }, @@ -1184,7 +1168,6 @@ "conf_thresh": 16, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": true, "map": [ 0.0, 445, 15.0, 925 ] } }, @@ -1247,4 +1230,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/pisp/data/imx708_wide.json b/src/ipa/rpi/pisp/data/imx708_wide.json index 8550cdfc1..9fff05d93 100644 --- a/src/ipa/rpi/pisp/data/imx708_wide.json +++ b/src/ipa/rpi/pisp/data/imx708_wide.json @@ -1148,27 +1148,23 @@ "step_coarse": 2.0, "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio" : 0.8, - "retrigger_delay" : 10, "pdaf_gain": -0.03, "pdaf_squelch": 0.2, - "max_slew": 3.0, + "max_slew": 4.0, "pdaf_frames": 20, "dropout_frames": 6, - "step_frames": 5 + "step_frames": 4 }, "fast": { - "step_coarse": 2.5, - "step_fine": 0.0, + "step_coarse": 2.0, + "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio" : 0.8, - "retrigger_delay" : 8, "pdaf_gain": -0.05, "pdaf_squelch": 0.2, - "max_slew": 4.0, + "max_slew": 5.0, "pdaf_frames": 16, - "dropout_frames": 4, + "dropout_frames": 6, "step_frames": 4 } }, @@ -1176,7 +1172,6 @@ "conf_thresh": 12, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": false, "map": [ 0.0, 420, 35.0, 920 ] } }, @@ -1295,4 +1290,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/pisp/data/imx708_wide_noir.json b/src/ipa/rpi/pisp/data/imx708_wide_noir.json index 069a06180..75d1149b6 100644 --- a/src/ipa/rpi/pisp/data/imx708_wide_noir.json +++ b/src/ipa/rpi/pisp/data/imx708_wide_noir.json @@ -1057,27 +1057,23 @@ "step_coarse": 2.0, "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio" : 0.8, - "retrigger_delay" : 10, "pdaf_gain": -0.03, "pdaf_squelch": 0.2, - "max_slew": 3.0, + "max_slew": 4.0, "pdaf_frames": 20, "dropout_frames": 6, - "step_frames": 5 + "step_frames": 4 }, "fast": { - "step_coarse": 2.5, - "step_fine": 0.0, + "step_coarse": 2.0, + "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio" : 0.8, - "retrigger_delay" : 8, "pdaf_gain": -0.05, "pdaf_squelch": 0.2, - "max_slew": 4.0, + "max_slew": 5.0, "pdaf_frames": 16, - "dropout_frames": 4, + "dropout_frames": 6, "step_frames": 4 } }, @@ -1085,7 +1081,6 @@ "conf_thresh": 12, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": true, "map": [ 0.0, 420, 35.0, 920 ] } }, @@ -1150,4 +1145,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/imx708.json b/src/ipa/rpi/vc4/data/imx708.json index e54ceff49..4de6f0796 100644 --- a/src/ipa/rpi/vc4/data/imx708.json +++ b/src/ipa/rpi/vc4/data/imx708.json @@ -638,27 +638,11 @@ "step_coarse": 1.0, "step_fine": 0.25, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, - "pdaf_gain": -0.016, - "pdaf_squelch": 0.125, - "max_slew": 1.5, - "pdaf_frames": 20, - "dropout_frames": 6, - "step_frames": 5 - }, - "fast": - { - "step_coarse": 1.25, - "step_fine": 0.0, - "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.02, "pdaf_squelch": 0.125, "max_slew": 2.0, - "pdaf_frames": 16, - "dropout_frames": 4, + "pdaf_frames": 20, + "dropout_frames": 6, "step_frames": 4 } }, @@ -666,7 +650,6 @@ "conf_thresh": 16, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": false, "map": [ 0.0, 445, 15.0, 925 ] } }, @@ -685,4 +668,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/imx708_noir.json b/src/ipa/rpi/vc4/data/imx708_noir.json index f351a1800..7b7ee874f 100644 --- a/src/ipa/rpi/vc4/data/imx708_noir.json +++ b/src/ipa/rpi/vc4/data/imx708_noir.json @@ -737,27 +737,11 @@ "step_coarse": 1.0, "step_fine": 0.25, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, - "pdaf_gain": -0.016, - "pdaf_squelch": 0.125, - "max_slew": 1.5, - "pdaf_frames": 20, - "dropout_frames": 6, - "step_frames": 5 - }, - "fast": - { - "step_coarse": 1.25, - "step_fine": 0.0, - "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.02, "pdaf_squelch": 0.125, "max_slew": 2.0, - "pdaf_frames": 16, - "dropout_frames": 4, + "pdaf_frames": 20, + "dropout_frames": 6, "step_frames": 4 } }, @@ -765,7 +749,6 @@ "conf_thresh": 16, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": true, "map": [ 0.0, 445, 15.0, 925 ] } }, @@ -784,4 +767,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/imx708_wide.json b/src/ipa/rpi/vc4/data/imx708_wide.json index bf1b122cd..6f45aafc0 100644 --- a/src/ipa/rpi/vc4/data/imx708_wide.json +++ b/src/ipa/rpi/vc4/data/imx708_wide.json @@ -637,27 +637,23 @@ "step_coarse": 2.0, "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, "pdaf_gain": -0.03, "pdaf_squelch": 0.2, - "max_slew": 3.0, + "max_slew": 4.0, "pdaf_frames": 20, "dropout_frames": 6, - "step_frames": 5 + "step_frames": 4 }, "fast": { - "step_coarse": 2.5, - "step_fine": 0.0, + "step_coarse": 2.0, + "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.05, "pdaf_squelch": 0.2, - "max_slew": 4.0, + "max_slew": 5.0, "pdaf_frames": 16, - "dropout_frames": 4, + "dropout_frames": 6, "step_frames": 4 } }, @@ -665,7 +661,6 @@ "conf_thresh": 12, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": false, "map": [ 0.0, 420, 35.0, 920 ] } }, @@ -684,4 +679,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/rpi/vc4/data/imx708_wide_noir.json b/src/ipa/rpi/vc4/data/imx708_wide_noir.json index ea1c8c690..b9a5227e1 100644 --- a/src/ipa/rpi/vc4/data/imx708_wide_noir.json +++ b/src/ipa/rpi/vc4/data/imx708_wide_noir.json @@ -628,27 +628,23 @@ "step_coarse": 2.0, "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 10, "pdaf_gain": -0.03, "pdaf_squelch": 0.2, - "max_slew": 3.0, + "max_slew": 4.0, "pdaf_frames": 20, "dropout_frames": 6, - "step_frames": 5 + "step_frames": 4 }, "fast": { - "step_coarse": 2.5, - "step_fine": 0.0, + "step_coarse": 2.0, + "step_fine": 0.5, "contrast_ratio": 0.75, - "retrigger_ratio": 0.8, - "retrigger_delay": 8, "pdaf_gain": -0.05, "pdaf_squelch": 0.2, - "max_slew": 4.0, + "max_slew": 5.0, "pdaf_frames": 16, - "dropout_frames": 4, + "dropout_frames": 6, "step_frames": 4 } }, @@ -656,7 +652,6 @@ "conf_thresh": 12, "conf_clip": 512, "skip_frames": 5, - "check_for_ir": true, "map": [ 0.0, 420, 35.0, 920 ] } }, @@ -675,4 +670,4 @@ } } ] -} +} \ No newline at end of file diff --git a/src/ipa/simple/algorithms/af.cpp b/src/ipa/simple/algorithms/af.cpp index 52ddf7f1a..b51ed95e4 100644 --- a/src/ipa/simple/algorithms/af.cpp +++ b/src/ipa/simple/algorithms/af.cpp @@ -27,18 +27,14 @@ int Af::init(IPAContext &context, [[maybe_unused]] const YamlObject &tuningData) { context.ctrlMap[&controls::LensPosition] = ControlInfo(0.0f, 100.0f, 50.0f); - context.ctrlMap[&controls::AfTrigger] = ControlInfo(0, 1, 0); return 0; } int Af::configure(IPAContext &context, [[maybe_unused]] const IPAConfigInfo &configInfo) { - context.activeState.knobs.focus_sweep = std::optional(); context.activeState.knobs.focus_pos = std::optional(); - context.activeState.knobs.focus_sweep = false; - context.activeState.knobs.focus_pos = 0; - context.configuration.focus.skip = 10; + return 0; } @@ -48,24 +44,10 @@ void Af::queueRequest([[maybe_unused]] typename Module::Context &context, const ControlList &controls) { const auto &focus_pos = controls.get(controls::LensPosition); - const auto &af_trigger = controls.get(controls::AfTrigger); if (focus_pos.has_value()) { context.activeState.knobs.focus_pos = focus_pos; LOG(IPASoftAutoFocus, Debug) << "Setting focus position to " << focus_pos.value(); } - if (af_trigger.has_value()) { - context.activeState.knobs.focus_sweep = af_trigger.value() == 1; - if(context.activeState.knobs.focus_sweep){ - context.activeState.knobs.focus_pos = 0; - context.configuration.focus.focus_max_pos = 0; - context.configuration.focus.sharpness_max = 0; - context.configuration.focus.start = 0; - context.configuration.focus.stop = 100; - context.configuration.focus.step = 25; - LOG(IPASoftAutoFocus, Info) << "Starting focus sweep"; - } - } - } void Af::updateFocus([[maybe_unused]] IPAContext &context, [[maybe_unused]] IPAFrameContext &frameContext, [[maybe_unused]] double exposureMSV) @@ -73,54 +55,12 @@ void Af::updateFocus([[maybe_unused]] IPAContext &context, [[maybe_unused]] IPAF frameContext.lens.focus_pos = context.activeState.knobs.focus_pos.value_or(50.0) / 100.0 * (context.configuration.focus.focus_max - context.configuration.focus.focus_min); } -void Af::step(uint32_t& skip, double& start, double& stop, double& step, double& focus_pos, double& max_pos, uint64_t& max_sharp, uint64_t sharp, bool& sweep){ - if(!sweep) - return; - if(skip != 0){ - skip --; - return; - } - skip = 2; - if(focus_pos < start) { - focus_pos = start; - return; - } - if(sharp > max_sharp) { - max_sharp = sharp; - max_pos = focus_pos; - } - if(focus_pos >= stop) { - LOG(IPASoftAutoFocus, Info) << "Best focus on step " <sharpness, - context.activeState.knobs.focus_sweep.value()); updateFocus(context, frameContext, 0); } diff --git a/src/ipa/simple/algorithms/af.h b/src/ipa/simple/algorithms/af.h index 901393717..a575ef102 100644 --- a/src/ipa/simple/algorithms/af.h +++ b/src/ipa/simple/algorithms/af.h @@ -33,7 +33,6 @@ public: private: void updateFocus(IPAContext &context, IPAFrameContext &frameContext, double focus); - void step(uint32_t& skip, double& start, double& stop, double& step, double& focus_pos, double& max_pos, uint64_t& max_sharp, uint64_t sharp, bool& sweep); }; } /* namespace ipa::soft::algorithms */ diff --git a/src/ipa/simple/algorithms/ccm.cpp b/src/ipa/simple/algorithms/ccm.cpp index 0a98406c1..cb023878d 100644 --- a/src/ipa/simple/algorithms/ccm.cpp +++ b/src/ipa/simple/algorithms/ccm.cpp @@ -14,6 +14,7 @@ #include #include "libcamera/internal/matrix.h" +#include "libcamera/internal/software_isp/debayer_params.h" namespace { @@ -84,7 +85,7 @@ void Ccm::applySaturation(Matrix &ccm, float saturation) } void Ccm::prepare(IPAContext &context, const uint32_t frame, - IPAFrameContext &frameContext, [[maybe_unused]] DebayerParams *params) + IPAFrameContext &frameContext, DebayerParams *params) { auto &saturation = context.activeState.knobs.saturation; @@ -108,6 +109,7 @@ void Ccm::prepare(IPAContext &context, const uint32_t frame, context.activeState.ccm.ccm = ccm; frameContext.ccm.ccm = ccm; frameContext.saturation = saturation; + params->ccm = ccm; context.activeState.ccm.changed = true; } diff --git a/src/ipa/simple/algorithms/lut.cpp b/src/ipa/simple/algorithms/lut.cpp index d1d5f7271..a161adb1a 100644 --- a/src/ipa/simple/algorithms/lut.cpp +++ b/src/ipa/simple/algorithms/lut.cpp @@ -126,6 +126,7 @@ void Lut::prepare(IPAContext &context, auto &red = params->redCcm; auto &green = params->greenCcm; auto &blue = params->blueCcm; + params->ccm = ccm; for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) { red[i].r = ccmValue(i, ccm[0][0]); red[i].g = ccmValue(i, ccm[1][0]); diff --git a/src/ipa/simple/algorithms/meson.build b/src/ipa/simple/algorithms/meson.build index e5666b262..dec59ee8c 100644 --- a/src/ipa/simple/algorithms/meson.build +++ b/src/ipa/simple/algorithms/meson.build @@ -7,5 +7,4 @@ soft_simple_ipa_algorithms = files([ 'ccm.cpp', 'lut.cpp', 'af.cpp', - 'stat.cpp', ]) diff --git a/src/ipa/simple/algorithms/stat.cpp b/src/ipa/simple/algorithms/stat.cpp deleted file mode 100644 index 181a5d818..000000000 --- a/src/ipa/simple/algorithms/stat.cpp +++ /dev/null @@ -1,65 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2025 Vasiliy Doylov - * - * Debayer statistic controls - */ - -#include "stat.h" - -#include - -#include - -#include "control_ids.h" - -namespace libcamera { - -LOG_DEFINE_CATEGORY(IPASoftStatistic) - -namespace ipa::soft::algorithms { - -Stat::Stat() -{ -} - -int Stat::init(IPAContext &context, - [[maybe_unused]] const YamlObject &tuningData) -{ - context.ctrlMap[&controls::DebugMetadataEnable] = ControlInfo(false, true, true); - return 0; -} - -int Stat::configure(IPAContext &context, - [[maybe_unused]] const IPAConfigInfo &configInfo) -{ - context.activeState.knobs.stats_enabled = std::optional(); - - return 0; -} - -void Stat::queueRequest([[maybe_unused]] typename Module::Context &context, - [[maybe_unused]] const uint32_t frame, - [[maybe_unused]] typename Module::FrameContext &frameContext, - const ControlList &controls) -{ - const auto &stats_enabled = controls.get(controls::DebugMetadataEnable); - if (stats_enabled.has_value()) { - context.activeState.knobs.stats_enabled = stats_enabled; - LOG(IPASoftStatistic, Debug) << "Setting debayer enabled to " << stats_enabled.value(); - } -} - -void Stat::prepare([[maybe_unused]]IPAContext &context, - [[maybe_unused]] const uint32_t frame, - [[maybe_unused]]IPAFrameContext &frameContext, - [[maybe_unused]] DebayerParams *params) -{ - params->collect_stats = context.activeState.knobs.stats_enabled.value_or(true); -} - -REGISTER_IPA_ALGORITHM(Stat, "Stat") - -} /* namespace ipa::soft::algorithms */ - -} /* namespace libcamera */ diff --git a/src/ipa/simple/algorithms/stat.h b/src/ipa/simple/algorithms/stat.h deleted file mode 100644 index dc0051cb0..000000000 --- a/src/ipa/simple/algorithms/stat.h +++ /dev/null @@ -1,38 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2025 Vasiliy Doylov - * - * Debayer statistic controls - */ - -#pragma once - -#include "algorithm.h" - -namespace libcamera { - -namespace ipa::soft::algorithms { - -class Stat : public Algorithm -{ -public: - Stat(); - ~Stat() = default; - - int init(IPAContext &context, const YamlObject &tuningData) override; - int configure(IPAContext &context, const IPAConfigInfo &configInfo) override; - void queueRequest(typename Module::Context &context, - const uint32_t frame, - typename Module::FrameContext &frameContext, - const ControlList &controls) - override; - void prepare(IPAContext &context, - const uint32_t frame, - IPAFrameContext &frameContext, - DebayerParams *params) override; - -}; - -} /* namespace ipa::soft::algorithms */ - -} /* namespace libcamera */ diff --git a/src/ipa/simple/data/uncalibrated.yaml b/src/ipa/simple/data/uncalibrated.yaml index 47eaf9c61..9274c11e1 100644 --- a/src/ipa/simple/data/uncalibrated.yaml +++ b/src/ipa/simple/data/uncalibrated.yaml @@ -8,14 +8,13 @@ algorithms: # Color correction matrices can be defined here. The CCM algorithm # has a significant performance impact, and should only be enabled # if tuned. - # - Ccm: - # ccms: - # - ct: 6500 - # ccm: [ 1, 0, 0, - # 0, 1, 0, - # 0, 0, 1] + - Ccm: + ccms: + - ct: 6500 + ccm: [ 1, 0, 0, + 0, 1, 0, + 0, 0, 1] - Lut: - Agc: - Af: - - Stat: ... diff --git a/src/ipa/simple/ipa_context.h b/src/ipa/simple/ipa_context.h index f59c4006f..c5b5527b5 100644 --- a/src/ipa/simple/ipa_context.h +++ b/src/ipa/simple/ipa_context.h @@ -36,10 +36,6 @@ struct IPASessionConfiguration { } black; struct { int32_t focus_min, focus_max; - double focus_max_pos; - uint64_t sharpness_max; - double start, stop, step; - uint32_t skip; } focus; }; @@ -79,10 +75,6 @@ struct IPAActiveState { std::optional exposure_value; /* 0..100 range, 50.0 = normal */ std::optional focus_pos; - /* 0..1 range, 1 = normal */ - std::optional stats_enabled; - /* 0..1 range, 0 = normal */ - std::optional focus_sweep; } knobs; }; diff --git a/src/libcamera/base/log.cpp b/src/libcamera/base/log.cpp index 6a8e2a3eb..8bf3e1daa 100644 --- a/src/libcamera/base/log.cpp +++ b/src/libcamera/base/log.cpp @@ -690,9 +690,8 @@ LogSeverity Logger::parseLogLevel(std::string_view level) unsigned int severity = LogInvalid; if (std::isdigit(level[0])) { - const char *levelEnd = level.data() + level.size(); - auto [end, ec] = std::from_chars(level.data(), levelEnd, severity); - if (ec != std::errc() || end != levelEnd || severity > LogFatal) + auto [end, ec] = std::from_chars(level.data(), level.data() + level.size(), severity); + if (ec != std::errc() || *end != '\0' || severity > LogFatal) severity = LogInvalid; } else { for (unsigned int i = 0; i < std::size(names); ++i) { diff --git a/src/libcamera/camera.cpp b/src/libcamera/camera.cpp index b4f9d2433..c180a5fdd 100644 --- a/src/libcamera/camera.cpp +++ b/src/libcamera/camera.cpp @@ -488,7 +488,7 @@ std::size_t CameraConfiguration::size() const * * \return A CameraConfiguration::Status value that describes the validation * status. - * \retval CameraConfiguration::Adjusted The configuration has been adjusted + * \retval CameraConfigutation::Adjusted The configuration has been adjusted * and is now valid. The color space of some or all of the streams may have * been changed. The caller shall check the color spaces carefully. * \retval CameraConfiguration::Valid The configuration was already valid and diff --git a/src/libcamera/clock_recovery.cpp b/src/libcamera/clock_recovery.cpp deleted file mode 100644 index abacf444f..000000000 --- a/src/libcamera/clock_recovery.cpp +++ /dev/null @@ -1,230 +0,0 @@ -/* SPDX-License-Identifier: LGPL-2.1-or-later */ -/* - * Copyright (C) 2024, Raspberry Pi Ltd - * - * Clock recovery algorithm - */ - -#include "libcamera/internal/clock_recovery.h" - -#include - -#include - -/** - * \file clock_recovery.h - * \brief Clock recovery - deriving one clock from another independent clock - */ - -namespace libcamera { - -LOG_DEFINE_CATEGORY(ClockRec) - -/** - * \class ClockRecovery - * \brief Recover an output clock from an input clock - * - * The ClockRecovery class derives an output clock from an input clock, - * modelling the output clock as being linearly related to the input clock. - * For example, we may use it to derive wall clock timestamps from timestamps - * measured by the internal system clock which counts local time since boot. - * - * When pairs of corresponding input and output timestamps are available, - * they should be submitted to the model with addSample(). The model will - * update, and output clock values for known input clock values can be - * obtained using getOutput(). - * - * As a convenience, if the input clock is indeed the time since boot, and the - * output clock represents a real wallclock time, then addSample() can be - * called with no arguments, and a pair of timestamps will be captured at - * that moment. - * - * The configure() function accepts some configuration parameters to control - * the linear fitting process. - */ - -/** - * \brief Construct a ClockRecovery - */ -ClockRecovery::ClockRecovery() -{ - configure(); - reset(); -} - -/** - * \brief Set configuration parameters - * \param[in] numSamples The approximate duration for which the state of the model - * is persistent - * \param[in] maxJitter New output samples are clamped to no more than this - * amount of jitter, to prevent sudden swings from having a large effect - * \param[in] minSamples The fitted clock model is not used to generate outputs - * until this many samples have been received - * \param[in] errorThreshold If the accumulated differences between input and - * output clocks reaches this amount over a few frames, the model is reset - */ -void ClockRecovery::configure(unsigned int numSamples, unsigned int maxJitter, - unsigned int minSamples, unsigned int errorThreshold) -{ - LOG(ClockRec, Debug) - << "configure " << numSamples << " " << maxJitter << " " << minSamples << " " << errorThreshold; - - numSamples_ = numSamples; - maxJitter_ = maxJitter; - minSamples_ = minSamples; - errorThreshold_ = errorThreshold; -} - -/** - * \brief Reset the clock recovery model and start again from scratch - */ -void ClockRecovery::reset() -{ - LOG(ClockRec, Debug) << "reset"; - - lastInput_ = 0; - lastOutput_ = 0; - xAve_ = 0; - yAve_ = 0; - x2Ave_ = 0; - xyAve_ = 0; - count_ = 0; - error_ = 0.0; - /* - * Setting slope_ and offset_ to zero initially means that the clocks - * advance at exactly the same rate. - */ - slope_ = 0.0; - offset_ = 0.0; -} - -/** - * \brief Add a sample point to the clock recovery model, for recovering a wall - * clock value from the internal system time since boot - * - * This is a convenience function to make it easy to derive a wall clock value - * (using the Linux CLOCK_REALTIME) from the time since the system started - * (measured by CLOCK_BOOTTIME). - */ -void ClockRecovery::addSample() -{ - LOG(ClockRec, Debug) << "addSample"; - - struct timespec bootTime1; - struct timespec bootTime2; - struct timespec wallTime; - - /* Get boot and wall clocks in microseconds. */ - clock_gettime(CLOCK_BOOTTIME, &bootTime1); - clock_gettime(CLOCK_REALTIME, &wallTime); - clock_gettime(CLOCK_BOOTTIME, &bootTime2); - uint64_t boot1 = bootTime1.tv_sec * 1000000ULL + bootTime1.tv_nsec / 1000; - uint64_t boot2 = bootTime2.tv_sec * 1000000ULL + bootTime2.tv_nsec / 1000; - uint64_t boot = (boot1 + boot2) / 2; - uint64_t wall = wallTime.tv_sec * 1000000ULL + wallTime.tv_nsec / 1000; - - addSample(boot, wall); -} - -/** - * \brief Add a sample point to the clock recovery model, specifying the exact - * input and output clock values - * \param[in] input The input clock value - * \param[in] output The value of the output clock at the same moment, as far - * as possible, that the input clock was sampled - * - * This function should be used for corresponding clocks other than the Linux - * BOOTTIME and REALTIME clocks. - */ -void ClockRecovery::addSample(uint64_t input, uint64_t output) -{ - LOG(ClockRec, Debug) << "addSample " << input << " " << output; - - if (count_ == 0) { - inputBase_ = input; - outputBase_ = output; - } - - /* - * We keep an eye on cumulative drift over the last several frames. If this exceeds a - * threshold, then probably the system clock has been updated and we're going to have to - * reset everything and start over. - */ - if (lastOutput_) { - int64_t inputDiff = getOutput(input) - getOutput(lastInput_); - int64_t outputDiff = output - lastOutput_; - error_ = error_ * 0.95 + (outputDiff - inputDiff); - if (std::abs(error_) > errorThreshold_) { - reset(); - inputBase_ = input; - outputBase_ = output; - } - } - lastInput_ = input; - lastOutput_ = output; - - /* - * Never let the new output value be more than maxJitter_ away from what - * we would have expected. This is just to reduce the effect of sudden - * large delays in the measured output. - */ - uint64_t expectedOutput = getOutput(input); - output = std::clamp(output, expectedOutput - maxJitter_, expectedOutput + maxJitter_); - - /* - * We use x, y, x^2 and x*y sums to calculate the best fit line. Here we - * update them by pretending we have count_ samples at the previous fit, - * and now one new one. Gradually the effect of the older values gets - * lost. This is a very simple way of updating the fit (there are much - * more complicated ones!), but it works well enough. Using averages - * instead of sums makes the relative effect of old values and the new - * sample clearer. - */ - double x = static_cast(input - inputBase_); - double y = static_cast(output - outputBase_) - x; - unsigned int count1 = count_ + 1; - xAve_ = (count_ * xAve_ + x) / count1; - yAve_ = (count_ * yAve_ + y) / count1; - x2Ave_ = (count_ * x2Ave_ + x * x) / count1; - xyAve_ = (count_ * xyAve_ + x * y) / count1; - - /* - * Don't update slope and offset until we've seen "enough" sample - * points. Note that the initial settings for slope_ and offset_ - * ensures that the wallclock advances at the same rate as the realtime - * clock (but with their respective initial offsets). - */ - if (count_ > minSamples_) { - /* These are the standard equations for least squares linear regression. */ - slope_ = (count1 * count1 * xyAve_ - count1 * xAve_ * count1 * yAve_) / - (count1 * count1 * x2Ave_ - count1 * xAve_ * count1 * xAve_); - offset_ = yAve_ - slope_ * xAve_; - } - - /* - * Don't increase count_ above numSamples_, as this controls the long-term - * amount of the residual fit. - */ - if (count1 < numSamples_) - count_++; -} - -/** - * \brief Calculate the output clock value according to the model from an input - * clock value - * \param[in] input The input clock value - * - * \return Output clock value - */ -uint64_t ClockRecovery::getOutput(uint64_t input) -{ - double x = static_cast(input - inputBase_); - double y = slope_ * x + offset_; - uint64_t output = y + x + outputBase_; - - LOG(ClockRec, Debug) << "getOutput " << input << " " << output; - - return output; -} - -} /* namespace libcamera */ diff --git a/src/libcamera/control_ids_core.yaml b/src/libcamera/control_ids_core.yaml index eec4b4f93..aa7448645 100644 --- a/src/libcamera/control_ids_core.yaml +++ b/src/libcamera/control_ids_core.yaml @@ -212,7 +212,7 @@ controls: description: | Exposure time for the frame applied in the sensor device. - This value is specified in microseconds. + This value is specified in micro-seconds. This control will only take effect if ExposureTimeMode is Manual. If this control is set when ExposureTimeMode is Auto, the value will be @@ -1268,20 +1268,4 @@ controls: description: | Enable or disable the debug metadata. - - FrameWallClock: - type: int64_t - direction: out - description: | - This timestamp corresponds to the same moment in time as the - SensorTimestamp, but is represented as a wall clock time as measured by - the CLOCK_REALTIME clock. Like SensorTimestamp, the timestamp value is - expressed in nanoseconds. - - Being a wall clock measurement, it can be used to synchronise timing - across different devices. - - \sa SensorTimestamp - - The FrameWallClock control can only be returned in metadata. - ... diff --git a/src/libcamera/control_ids_rpi.yaml b/src/libcamera/control_ids_rpi.yaml index a86151123..8d1e8b47c 100644 --- a/src/libcamera/control_ids_rpi.yaml +++ b/src/libcamera/control_ids_rpi.yaml @@ -71,116 +71,4 @@ controls: \sa StatsOutputEnable - - SyncMode: - type: int32_t - direction: in - description: | - Enable or disable camera synchronisation ("sync") mode. - - When sync mode is enabled, a camera will synchronise frames temporally - with other cameras, either attached to the same device or a different - one. There should be one "server" device, which broadcasts timing - information to one or more "clients". Communication is one-way, from - server to clients only, and it is only clients that adjust their frame - timings to match the server. - - Sync mode requires all cameras to be running at (as far as possible) the - same fixed framerate. Clients may continue to make adjustments to keep - their cameras synchronised with the server for the duration of the - session, though any updates after the initial ones should remain small. - - \sa SyncReady - \sa SyncTimer - \sa SyncFrames - - enum: - - name: SyncModeOff - value: 0 - description: Disable sync mode. - - name: SyncModeServer - value: 1 - description: | - Enable sync mode, act as server. The server broadcasts timing - messages to any clients that are listening, so that the clients can - synchronise their camera frames with the server's. - - name: SyncModeClient - value: 2 - description: | - Enable sync mode, act as client. A client listens for any server - messages, and arranges for its camera frames to synchronise as - closely as possible with the server's. Many clients can listen out - for the same server. Clients can also be started ahead of any - servers, causing them merely to wait for the server to start. - - - SyncReady: - type: bool - direction: out - description: | - When using the camera synchronisation algorithm, the server broadcasts - timing information to the clients. This also includes the time (some - number of frames in the future, called the "ready time") at which the - server will signal its controlling application, using this control, to - start using the image frames. - - The client receives the "ready time" from the server, and will signal - its application to start using the frames at this same moment. - - While this control value is false, applications (on both client and - server) should continue to wait, and not use the frames. - - Once this value becomes true, it means that this is the first frame - where the server and its clients have agreed that they will both be - synchronised and that applications should begin consuming frames. - Thereafter, this control will continue to signal the value true for - the rest of the session. - - \sa SyncMode - \sa SyncTimer - \sa SyncFrames - - - SyncTimer: - type: int64_t - direction: out - description: | - This reports the amount of time, in microseconds, until the "ready - time", at which the server and client will signal their controlling - applications that the frames are now synchronised and should be - used. The value may be refined slightly over time, becoming more precise - as the "ready time" approaches. - - Servers always report this value, whereas clients will omit this control - until they have received a message from the server that enables them to - calculate it. - - Normally the value will start positive (the "ready time" is in the - future), and decrease towards zero, before becoming negative (the "ready - time" has elapsed). So there should be just one frame where the timer - value is, or is very close to, zero - the one for which the SyncReady - control becomes true. At this moment, the value indicates how closely - synchronised the client believes it is with the server. - - But note that if frames are being dropped, then the "near zero" valued - frame, or indeed any other, could be skipped. In these cases the timer - value allows an application to deduce that this has happened. - - \sa SyncMode - \sa SyncReady - \sa SyncFrames - - - SyncFrames: - type: int32_t - direction: in - description: | - The number of frames the server should wait, after enabling - SyncModeServer, before signalling (via the SyncReady control) that - frames should be used. This therefore determines the "ready time" for - all synchronised cameras. - - This control value should be set only for the device that is to act as - the server, before or at the same moment at which SyncModeServer is - enabled. - - \sa SyncMode - \sa SyncReady - \sa SyncTimer ... diff --git a/src/libcamera/egl.cpp b/src/libcamera/egl.cpp new file mode 100644 index 000000000..c6b0f9a56 --- /dev/null +++ b/src/libcamera/egl.cpp @@ -0,0 +1,369 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Linaro Ltd. + * + * Authors: + * Bryan O'Donoghue + * + * egl.cpp - Helper class for managing eGL interactions. + */ + +#include "libcamera/internal/egl.h" + +#include +#include +#include +#include + +#include +#include + +namespace libcamera { + +LOG_DEFINE_CATEGORY(eGL) + +eGL::eGL() +{ +} + +eGL::~eGL() +{ +} + +// Create linear image attached to previous BO object +int eGL::createDMABufTexture2D(eGLImage *eglImage, int fd) +{ + int ret = 0; + + eglImage->stride_ = eglImage->width_ * eglImage->height_; + eglImage->offset_ = 0; + eglImage->framesize_ = eglImage->height_ * eglImage->stride_; + + LOG(eGL, Info) + << " stride " << eglImage->stride_ << " width " << eglImage->width_ << " height " << eglImage->height_ << " offset " << eglImage->offset_ << " framesize " << eglImage->framesize_; + + // TODO: use the dma buf handle from udma heap here directly + // should work for both input and output with fencing + EGLint image_attrs[] = { + EGL_WIDTH, (EGLint)eglImage->width_, + EGL_HEIGHT, (EGLint)eglImage->height_, + EGL_LINUX_DRM_FOURCC_EXT, (int)GBM_FORMAT_ARGB8888, + EGL_DMA_BUF_PLANE0_FD_EXT, fd, + EGL_DMA_BUF_PLANE0_OFFSET_EXT, 0, + EGL_DMA_BUF_PLANE0_PITCH_EXT, (EGLint)eglImage->framesize_, + EGL_NONE, EGL_NONE, /* modifier lo */ + EGL_NONE, EGL_NONE, /* modifier hi */ + EGL_NONE, + }; + + eglImage->image_ = eglCreateImageKHR(display_, EGL_NO_CONTEXT, + EGL_LINUX_DMA_BUF_EXT, + NULL, image_attrs); + + if (eglImage->image_ == EGL_NO_IMAGE_KHR) { + LOG(eGL, Error) << "eglCreateImageKHR fail"; + ret = -ENODEV; + goto done; + } + + // Generate texture, bind, associate image to texture, configure, unbind + glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, eglImage->image_); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + +done: + return ret; +} + +void eGL::destroyDMABufTexture(eGLImage *eglImage) +{ + eglDestroyImage(display_, eglImage->image_); +} + +// +// Generate a 2D texture from an input buffer directly +void eGL::createTexture2D(eGLImage *eglImage, GLint format, uint32_t width, uint32_t height, void *data) +{ + glActiveTexture(eglImage->texture_unit_); + glBindTexture(GL_TEXTURE_2D, eglImage->texture_); + + // Generate texture, bind, associate image to texture, configure, unbind + glTexImage2D(GL_TEXTURE_2D, 0, format, width, height, 0, format, GL_UNSIGNED_BYTE, data); + + // Nearest filtering + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); + + // Wrap to edge to avoid edge artifacts + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); +} + +int eGL::initEGLContext(GBM *gbmContext) +{ + EGLint configAttribs[] = { + EGL_RED_SIZE, 8, + EGL_GREEN_SIZE, 8, + EGL_BLUE_SIZE, 8, + EGL_ALPHA_SIZE, 8, + EGL_SURFACE_TYPE, EGL_WINDOW_BIT, + EGL_RENDERABLE_TYPE, EGL_OPENGL_ES2_BIT, + EGL_NONE + }; + + EGLint contextAttribs[] = { + EGL_CONTEXT_MAJOR_VERSION, 2, + EGL_NONE + }; + + EGLint numConfigs; + EGLConfig config; + EGLint major; + EGLint minor; + + if (!eglBindAPI(EGL_OPENGL_ES_API)) { + LOG(eGL, Error) << "API bind fail"; + goto fail; + } + + //TODO: use optional eglGetPlatformDisplayEXT ? + display_ = eglGetDisplay(gbmContext->getDevice()); + if (display_ == EGL_NO_DISPLAY) { + LOG(eGL, Error) << "Unable to get EGL display"; + goto fail; + } + + if (eglInitialize(display_, &major, &minor) != EGL_TRUE) { + LOG(eGL, Error) << "eglInitialize fail"; + goto fail; + } + + LOG(eGL, Info) << "EGL: version " << major << "." << minor; + LOG(eGL, Info) << "EGL: EGL_VERSION: " << eglQueryString(display_, EGL_VERSION); + LOG(eGL, Info) << "EGL: EGL_VENDOR: " << eglQueryString(display_, EGL_VENDOR); + LOG(eGL, Info) << "EGL: EGL_CLIENT_APIS: " << eglQueryString(display_, EGL_CLIENT_APIS); + LOG(eGL, Info) << "EGL: EGL_EXTENSIONS: " << eglQueryString(display_, EGL_EXTENSIONS); + + //TODO: interrogate strings to make sure we aren't hooking unsupported functions + // and remember to error out if a function we depend on isn't found. + // we don't use these functions right now but expect to for DMA backed + // texture generation and render-to-texture. One thing we can do is differentiate + // between DMA and non-DMA texture generation based on the presence of these functions + // In reality most - all ? - mesa implementations have these extensions so + // probably no fallback will be required + eglCreateImageKHR = (PFNEGLCREATEIMAGEKHRPROC)eglGetProcAddress("eglCreateImageKHR"); + if (!eglCreateImageKHR) + LOG(eGL, Warning) << "eglCreateImageKHR not found"; + + eglDestroyImageKHR = (PFNEGLDESTROYIMAGEKHRPROC)eglGetProcAddress("eglDestroyImageKHR"); + if (!eglDestroyImageKHR) + LOG(eGL, Warning) << "eglDestroyImageKHR not found"; + + eglExportDMABUFImageMESA = (PFNEGLEXPORTDMABUFIMAGEMESAPROC)eglGetProcAddress("eglExportDMABUFImageMESA"); + if (!eglExportDMABUFImageMESA) + LOG(eGL, Warning) << "eglExportDMABUFImageMESA not found"; + + glEGLImageTargetTexture2DOES = (PFNGLEGLIMAGETARGETTEXTURE2DOESPROC)eglGetProcAddress("glEGLImageTargetTexture2DOES"); + if (!glEGLImageTargetTexture2DOES) + LOG(eGL, Warning) << "glEGLImageTargetTexture2DOES not found"; + + eglClientWaitSyncKHR = (PFNEGLCLIENTWAITSYNCKHRPROC)eglGetProcAddress("eglClientWaitSyncKHR"); + if (!eglClientWaitSyncKHR) + LOG(eGL, Warning) << "eglClientWaitSyncKHR not found"; + + eglCreateSyncKHR = (PFNEGLCREATESYNCKHRPROC)eglGetProcAddress("eglCreateSyncKHR"); + if (!eglCreateSyncKHR) + LOG(eGL, Warning) << "eglCreateSyncKHR not found"; + + if (eglChooseConfig(display_, configAttribs, &config, 1, &numConfigs) != EGL_TRUE) { + LOG(eGL, Error) << "eglChooseConfig fail"; + goto fail; + } + + context_ = eglCreateContext(display_, config, EGL_NO_CONTEXT, contextAttribs); + if (context_ == EGL_NO_CONTEXT) { + LOG(eGL, Error) << "eglContext returned EGL_NO_CONTEXT"; + goto fail; + } + + surface_ = eglCreateWindowSurface(display_, config, + (EGLNativeWindowType)gbmContext->getSurface(), + NULL); + if (surface_ == EGL_NO_SURFACE) { + LOG(eGL, Error) << "eglCreateWindowSurface fail"; + goto fail; + } + + makeCurrent(); + swapBuffers(); + + return 0; +fail: + + return -ENODEV; +} + +void eGL::makeCurrent(void) +{ + if (eglMakeCurrent(display_, surface_, surface_, context_) != EGL_TRUE) { + LOG(eGL, Error) << "eglMakeCurrent fail"; + } +} + +void eGL::swapBuffers(void) +{ + if (eglSwapBuffers(display_, surface_) != EGL_TRUE) { + LOG(eGL, Error) << "eglSwapBuffers fail"; + } +} + +void eGL::useProgram(GLuint programId) +{ + glUseProgram(programId); +} + +void eGL::pushEnv(std::vector &shaderEnv, const char *str) +{ + std::string addStr = str; + + addStr.push_back('\n'); + shaderEnv.push_back(addStr); +} + +int eGL::compileVertexShader(GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv) +{ + return compileShader(GL_VERTEX_SHADER, shaderId, shaderData, shaderDataLen, shaderEnv); +} + +int eGL::compileFragmentShader(GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv) +{ + return compileShader(GL_FRAGMENT_SHADER, shaderId, shaderData, shaderDataLen, shaderEnv); +} + +int eGL::compileShader(int shaderType, GLuint &shaderId, unsigned char *shaderData, + unsigned int shaderDataLen, + std::vector shaderEnv) +{ + GLchar **shaderSourceData; + GLint *shaderDataLengths; + GLint success; + GLsizei count; + size_t i; + + count = 1 + shaderEnv.size(); + shaderSourceData = new GLchar *[count]; + shaderDataLengths = new GLint[count]; + + // Prefix defines before main body of shader + for (i = 0; i < shaderEnv.size(); i++) { + shaderSourceData[i] = (GLchar *)shaderEnv[i].c_str(); + shaderDataLengths[i] = shaderEnv[i].length(); + } + + // Now the main body of the shader program + shaderSourceData[i] = (GLchar *)shaderData; + shaderDataLengths[i] = shaderDataLen; + + // And create the shader + shaderId = glCreateShader(shaderType); + glShaderSource(shaderId, count, shaderSourceData, shaderDataLengths); + glCompileShader(shaderId); + + // Check status + glGetShaderiv(shaderId, GL_COMPILE_STATUS, &success); + if (success == GL_FALSE) { + GLint sizeLog = 0; + GLchar *infoLog; + + glGetShaderiv(shaderId, GL_INFO_LOG_LENGTH, &sizeLog); + infoLog = new GLchar[sizeLog]; + + glGetShaderInfoLog(shaderId, sizeLog, &sizeLog, infoLog); + LOG(eGL, Error) << infoLog; + + delete[] infoLog; + } + + delete[] shaderSourceData; + delete[] shaderDataLengths; + + return !(success == GL_TRUE); +} + +void eGL::dumpShaderSource(GLuint shaderId) +{ + GLint shaderLength = 0; + GLchar *shaderSource; + + glGetShaderiv(shaderId, GL_SHADER_SOURCE_LENGTH, &shaderLength); + + LOG(eGL, Debug) << "Shader length is " << shaderLength; + + if (shaderLength > 0) { + shaderSource = new GLchar[shaderLength]; + if (!shaderSource) + return; + + glGetShaderSource(shaderId, shaderLength, &shaderLength, shaderSource); + if (shaderLength) { + LOG(eGL, Debug) << "Shader source = " << shaderSource; + } + delete[] shaderSource; + } +} + +int eGL::linkProgram(GLuint &programId, GLuint vertexshaderId, GLuint fragmentshaderId) +{ + GLint success; + GLenum err; + + programId = glCreateProgram(); + if (!programId) + goto fail; + + glAttachShader(programId, vertexshaderId); + if ((err = glGetError()) != GL_NO_ERROR) { + LOG(eGL, Error) << "Attach compute vertex shader fail"; + goto fail; + } + + glAttachShader(programId, fragmentshaderId); + if ((err = glGetError()) != GL_NO_ERROR) { + LOG(eGL, Error) << "Attach compute vertex shader fail"; + goto fail; + } + + glLinkProgram(programId); + if ((err = glGetError()) != GL_NO_ERROR) { + LOG(eGL, Error) << "Link program fail"; + goto fail; + } + + glDetachShader(programId, fragmentshaderId); + glDetachShader(programId, vertexshaderId); + + // Check status + glGetProgramiv(programId, GL_LINK_STATUS, &success); + if (success == GL_FALSE) { + GLint sizeLog = 0; + GLchar *infoLog; + + glGetProgramiv(programId, GL_INFO_LOG_LENGTH, &sizeLog); + infoLog = new GLchar[sizeLog]; + + glGetProgramInfoLog(programId, sizeLog, &sizeLog, infoLog); + LOG(eGL, Error) << infoLog; + + delete[] infoLog; + goto fail; + } + + return 0; +fail: + return -ENODEV; +} +} // namespace libcamera diff --git a/src/libcamera/framebuffer.cpp b/src/libcamera/framebuffer.cpp index 219db50d6..826848f75 100644 --- a/src/libcamera/framebuffer.cpp +++ b/src/libcamera/framebuffer.cpp @@ -43,19 +43,12 @@ LOG_DEFINE_CATEGORY(Buffer) * The frame has been captured with success and contains valid data. All fields * of the FrameMetadata structure are valid. * \var FrameMetadata::FrameError - * The frame data is partly or fully corrupted, missing or otherwise invalid. - * This can for instance indicate a hardware transmission error, or invalid data - * produced by the sensor during its startup phase. The sequence and timestamp - * fields of the FrameMetadata structure is valid, all the other fields may be - * invalid. + * An error occurred during capture of the frame. The frame data may be partly + * or fully invalid. The sequence and timestamp fields of the FrameMetadata + * structure is valid, the other fields may be invalid. * \var FrameMetadata::FrameCancelled * Capture stopped before the frame completed. The frame data is not valid. All * fields of the FrameMetadata structure but the status field are invalid. - * \var FrameMetadata::FrameStartup - * The frame has been successfully captured. However, the IPA is in a - * cold-start or reset phase and will result in image quality parameters - * producing unusable images. Applications are recommended to not consume these - * frames. All other fields of the FrameMetadata structure are valid. */ /** diff --git a/src/libcamera/gbm.cpp b/src/libcamera/gbm.cpp new file mode 100644 index 000000000..43032093e --- /dev/null +++ b/src/libcamera/gbm.cpp @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Linaro Ltd. + * + * Authors: + * Bryan O'Donoghue + * + * egl.cpp - Helper class for managing GBM interactions. + */ + +#include "libcamera/internal/gbm.h" + +#include +#include +#include +#include + +#include +#include + +namespace libcamera { + +LOG_DEFINE_CATEGORY(GBM) + +GBM::GBM() +{ + fd_ = 0; +} + +GBM::~GBM() +{ + if (gbm_surface_) + gbm_surface_destroy(gbm_surface_); + + if (gbm_device_) + gbm_device_destroy(gbm_device_); + + if (fd_ >= 0) + close(fd_); +} + +// this should probably go into its own class to deal with the +// allocation and deletion of frambuffers attached to GBM devices/objects +int GBM::initSurface(uint32_t width, uint32_t height) +{ + const char *dri_node = "/dev/dri/renderD128"; //TODO: get from an env or config setting + + fd_ = open(dri_node, O_RDWR | O_CLOEXEC); //TODO: CLOEXEC ? + if (fd_ < 0) { + LOG(GBM, Error) << "Open " << dri_node << " fail " << fd_; + return fd_; + } + + gbm_device_ = gbm_create_device(fd_); + if (!gbm_device_) { + LOG(GBM, Error) << "gbm_crate_device fail"; + goto fail; + } + + // GBM_FORMAT_RGBA8888 is not supported mesa::src/gbm/dri/gbm_dri.c::gbm_dri_visuals_table[] + // This means we need to choose XRGB8888 or ARGB8888 as the raw buffer format + gbm_surface_ = gbm_surface_create(gbm_device_, width, height, GBM_FORMAT_ARGB8888, + GBM_BO_USE_RENDERING | GBM_BO_USE_LINEAR); + if (!gbm_surface_) { + LOG(GBM, Error) << "Unable to create linear gbm surface"; + goto fail; + } + + format_ = libcamera::formats::ARGB8888; + + return 0; +fail: + return -ENODEV; +} + +int GBM::mapSurface() +{ + gbm_bo_ = gbm_surface_lock_front_buffer(gbm_surface_); + if (!gbm_bo_) { + LOG(GBM, Error) << "GBM input buffer object create fail"; + return -ENODEV; + } + gbm_surface_release_buffer(gbm_surface_, gbm_bo_); + + bo_fd_ = gbm_bo_get_fd(gbm_bo_); + + if (!bo_fd_) { + gbm_surface_release_buffer(gbm_surface_, gbm_bo_); + LOG(GBM, Error) << "Unable to get fd for bo: " << bo_fd_; + return -ENODEV; + } + + stride_ = gbm_bo_get_stride(gbm_bo_); + width_ = gbm_bo_get_width(gbm_bo_); + height_ = gbm_bo_get_height(gbm_bo_); + offset_ = gbm_bo_get_offset(gbm_bo_, 0); + framesize_ = height_ * stride_; + + map_ = mmap(NULL, height_ * stride_, PROT_READ, MAP_SHARED, bo_fd_, 0); + if (map_ == MAP_FAILED) { + LOG(GBM, Error) << "mmap gbm_bo_ fail"; + return -ENODEV; + } + + LOG(GBM, Debug) << " stride " << stride_ + << " width " << width_ + << " height " << height_ + << " offset " << offset_ + << " framesize " << framesize_; + + return 0; +} + +int GBM::getFrameBufferData(uint8_t *data, size_t data_len) +{ + struct dma_buf_sync sync; + + gbm_surface_lock_front_buffer(gbm_surface_); + + sync.flags = DMA_BUF_SYNC_START | DMA_BUF_SYNC_READ; + ioctl(bo_fd_, DMA_BUF_IOCTL_SYNC, &sync); + + if (data_len > framesize_) { + LOG(GBM, Error) << "Invalid read size " << data_len << " max is " << framesize_; + return -EINVAL; + } + + memcpy(data, map_, data_len); + + sync.flags = DMA_BUF_SYNC_END | DMA_BUF_SYNC_READ; + ioctl(bo_fd_, DMA_BUF_IOCTL_SYNC, &sync); + + gbm_surface_release_buffer(gbm_surface_, gbm_bo_); + + return 0; +} +} //namespace libcamera diff --git a/src/libcamera/mapped_framebuffer.cpp b/src/libcamera/mapped_framebuffer.cpp index f54bbf21f..d5f347d4d 100644 --- a/src/libcamera/mapped_framebuffer.cpp +++ b/src/libcamera/mapped_framebuffer.cpp @@ -238,6 +238,13 @@ MappedFrameBuffer::MappedFrameBuffer(const FrameBuffer *buffer, MapFlags flags) planes_.emplace_back(info.address + plane.offset, plane.length); } + + buffer_ = buffer; +} + +int MappedFrameBuffer::getPlaneFD(int plane) +{ + return buffer_->planes()[plane].fd.get(); } } /* namespace libcamera */ diff --git a/src/libcamera/meson.build b/src/libcamera/meson.build index de1eb99b2..491eb7340 100644 --- a/src/libcamera/meson.build +++ b/src/libcamera/meson.build @@ -21,7 +21,6 @@ libcamera_internal_sources = files([ 'byte_stream_buffer.cpp', 'camera_controls.cpp', 'camera_lens.cpp', - 'clock_recovery.cpp', 'control_serializer.cpp', 'control_validator.cpp', 'converter.cpp', @@ -68,6 +67,37 @@ libcamera_deps = [] libatomic = cc.find_library('atomic', required : false) libthreads = dependency('threads') +libgbm = cc.find_library('gbm', required: false) +gbm_works = cc.check_header('gbm.h', required: false) + +if libgbm.found() and gbm_works + config_h.set('HAVE_GBM', 1) + libcamera_internal_sources += files([ + 'gbm.cpp', + ]) +endif + +libegl = cc.find_library('EGL', required : false) +libglesv2 = cc.find_library('GLESv2', required : false) +mesa_works = cc.check_header('EGL/egl.h', required: false) + +if libegl.found() and mesa_works + config_h.set('HAVE_LIBEGL', 1) +endif + +if libglesv2.found() and mesa_works + config_h.set('HAVE_GLESV2', 1) +endif + +if mesa_works and gbm_works + libcamera_internal_sources += files([ + 'egl.cpp', + ]) + gles_headless_enabled = true +else + gles_headless_enabled = false +endif + subdir('base') subdir('converter') subdir('ipa') @@ -84,10 +114,7 @@ if not cc.has_function('dlopen') libdl = cc.find_library('dl') endif libudev = dependency('libudev', required : get_option('udev')) -libyaml = dependency('yaml-0.1', default_options : [ - 'default_library=static', - 'werror=false', -]) +libyaml = dependency('yaml-0.1', required : false) # Use one of gnutls or libcrypto (provided by OpenSSL), trying gnutls first. libcrypto = dependency('gnutls', required : false) @@ -123,6 +150,17 @@ if libudev.found() ]) endif +# Fallback to a subproject if libyaml isn't found, as it's not packaged in AOSP. +if not libyaml.found() + cmake = import('cmake') + + libyaml_vars = cmake.subproject_options() + libyaml_vars.add_cmake_defines({'CMAKE_POSITION_INDEPENDENT_CODE': 'ON'}) + libyaml_vars.append_compile_args('c', '-Wno-unused-value') + libyaml_wrap = cmake.subproject('libyaml', options : libyaml_vars) + libyaml = libyaml_wrap.dependency('yaml') +endif + control_sources = [] controls_mode_files = { @@ -181,6 +219,9 @@ libcamera_deps += [ libcamera_base_private, libcrypto, libdl, + libegl, + libgbm, + libglesv2, liblttng, libudev, libyaml, diff --git a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp index f4014b95d..ecda426a6 100644 --- a/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp +++ b/src/libcamera/pipeline/imx8-isi/imx8-isi.cpp @@ -761,28 +761,30 @@ PipelineHandlerISI::generateConfiguration(Camera *camera, */ StreamConfiguration cfg; - switch (role) { - case StreamRole::StillCapture: - case StreamRole::Viewfinder: - case StreamRole::VideoRecording: { - Size size = role == StreamRole::StillCapture - ? data->sensor_->resolution() - : PipelineHandlerISI::kPreviewSize; - cfg = generateYUVConfiguration(camera, size); - if (cfg.pixelFormat.isValid()) - break; + switch (role) { + case StreamRole::StillCapture: + case StreamRole::Viewfinder: + case StreamRole::VideoRecording: { + Size size = role == StreamRole::StillCapture + ? data->sensor_->resolution() + : PipelineHandlerISI::kPreviewSize; + cfg = generateYUVConfiguration(camera, size); + if (cfg.pixelFormat.isValid()) + break; - /* - * Fallback to use a Bayer format if that's what the - * sensor supports. - */ - [[fallthrough]]; - } - case StreamRole::Raw: { - cfg = generateRawConfiguration(camera); - break; - } + /* + * Fallback to use a Bayer format if that's what the + * sensor supports. + */ + [[fallthrough]]; + + } + + case StreamRole::Raw: { + cfg = generateRawConfiguration(camera); + break; + } default: LOG(ISI, Error) << "Requested stream role not supported: " << role; @@ -820,7 +822,7 @@ int PipelineHandlerISI::configure(Camera *camera, CameraConfiguration *c) * routing table instead of resetting it. */ V4L2Subdevice::Routing routing = {}; - unsigned int xbarFirstSource = crossbar_->entity()->pads().size() - pipes_.size(); + unsigned int xbarFirstSource = crossbar_->entity()->pads().size() / 2 + 1; for (const auto &[idx, config] : utils::enumerate(*c)) { uint32_t sourcePad = xbarFirstSource + idx; @@ -1003,7 +1005,7 @@ bool PipelineHandlerISI::match(DeviceEnumerator *enumerator) ret = capture->open(); if (ret) - return false; + return ret; pipes_.push_back({ std::move(isi), std::move(capture) }); } diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp index eafe94427..1f13e5230 100644 --- a/src/libcamera/pipeline/rpi/common/pipeline_base.cpp +++ b/src/libcamera/pipeline/rpi/common/pipeline_base.cpp @@ -659,9 +659,9 @@ int PipelineHandlerBase::start(Camera *camera, const ControlList *controls) if (!result.controls.empty()) data->setSensorControls(result.controls); - /* Configure the number of startup and invalid frames reported by the IPA. */ - data->startupFrameCount_ = result.startupFrameCount; - data->invalidFrameCount_ = result.invalidFrameCount; + /* Configure the number of dropped frames required on startup. */ + data->dropFrameCount_ = data->config_.disableStartupFrameDrops + ? 0 : result.dropFrameCount; for (auto const stream : data->streams_) stream->resetBuffers(); @@ -678,6 +678,7 @@ int PipelineHandlerBase::start(Camera *camera, const ControlList *controls) data->buffersAllocated_ = true; } + /* We need to set the dropFrameCount_ before queueing buffers. */ ret = queueAllBuffers(camera); if (ret) { LOG(RPI, Error) << "Failed to queue buffers"; @@ -685,9 +686,6 @@ int PipelineHandlerBase::start(Camera *camera, const ControlList *controls) return ret; } - /* A good moment to add an initial clock sample. */ - data->wallClockRecovery_.addSample(); - /* * Reset the delayed controls with the gain and exposure values set by * the IPA. @@ -806,8 +804,7 @@ int PipelineHandlerBase::registerCamera(std::unique_ptr &camera * chain. There may be a cascade of devices in this chain! */ MediaLink *link = sensorEntity->getPadByIndex(0)->links()[0]; - if (!data->enumerateVideoDevices(link, frontendName)) - return -EINVAL; + data->enumerateVideoDevices(link, frontendName); ipa::RPi::InitResult result; if (data->loadIPA(&result)) { @@ -897,12 +894,28 @@ int PipelineHandlerBase::queueAllBuffers(Camera *camera) int ret; for (auto const stream : data->streams_) { - if (stream->getFlags() & StreamFlag::External) - continue; - - ret = stream->queueAllBuffers(); - if (ret < 0) - return ret; + if (!(stream->getFlags() & StreamFlag::External)) { + ret = stream->queueAllBuffers(); + if (ret < 0) + return ret; + } else { + /* + * For external streams, we must queue up a set of internal + * buffers to handle the number of drop frames requested by + * the IPA. This is done by passing nullptr in queueBuffer(). + * + * The below queueBuffer() call will do nothing if there + * are not enough internal buffers allocated, but this will + * be handled by queuing the request for buffers in the + * RPiStream object. + */ + unsigned int i; + for (i = 0; i < data->dropFrameCount_; i++) { + ret = stream->queueBuffer(nullptr); + if (ret) + return ret; + } + } } return 0; @@ -1019,20 +1032,16 @@ void CameraData::freeBuffers() * | Sensor2 | | Sensor3 | * +---------+ +---------+ */ -bool CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend) +void CameraData::enumerateVideoDevices(MediaLink *link, const std::string &frontend) { const MediaPad *sinkPad = link->sink(); const MediaEntity *entity = sinkPad->entity(); bool frontendFound = false; - /* Once we reach the Frontend entity, we are done. */ - if (link->sink()->entity()->name() == frontend) - return true; - /* We only deal with Video Mux and Bridge devices in cascade. */ if (entity->function() != MEDIA_ENT_F_VID_MUX && entity->function() != MEDIA_ENT_F_VID_IF_BRIDGE) - return false; + return; /* Find the source pad for this Video Mux or Bridge device. */ const MediaPad *sourcePad = nullptr; @@ -1044,7 +1053,7 @@ bool CameraData::enumerateVideoDevices(MediaLink *link, const std::string &front * and this branch in the cascade. */ if (sourcePad) - return false; + return; sourcePad = pad; } @@ -1061,9 +1070,12 @@ bool CameraData::enumerateVideoDevices(MediaLink *link, const std::string &front * other Video Mux and Bridge devices. */ for (MediaLink *l : sourcePad->links()) { - frontendFound = enumerateVideoDevices(l, frontend); - if (frontendFound) + enumerateVideoDevices(l, frontend); + /* Once we reach the Frontend entity, we are done. */ + if (l->sink()->entity()->name() == frontend) { + frontendFound = true; break; + } } /* This identifies the end of our entity enumeration recursion. */ @@ -1078,13 +1090,12 @@ bool CameraData::enumerateVideoDevices(MediaLink *link, const std::string &front bridgeDevices_.clear(); } } - - return frontendFound; } int CameraData::loadPipelineConfiguration() { config_ = { + .disableStartupFrameDrops = false, .cameraTimeoutValue = 0, }; @@ -1121,10 +1132,8 @@ int CameraData::loadPipelineConfiguration() const YamlObject &phConfig = (*root)["pipeline_handler"]; - if (phConfig.contains("disable_startup_frame_drops")) - LOG(RPI, Warning) - << "The disable_startup_frame_drops key is now deprecated, " - << "startup frames are now identified by the FrameMetadata::Status::FrameStartup flag"; + config_.disableStartupFrameDrops = + phConfig["disable_startup_frame_drops"].get(config_.disableStartupFrameDrops); config_.cameraTimeoutValue = phConfig["camera_timeout_value_ms"].get(config_.cameraTimeoutValue); @@ -1403,15 +1412,7 @@ void CameraData::handleStreamBuffer(FrameBuffer *buffer, RPi::Stream *stream) * buffer back to the stream. */ Request *request = requestQueue_.empty() ? nullptr : requestQueue_.front(); - if (request && request->findBuffer(stream) == buffer) { - FrameMetadata &md = buffer->_d()->metadata(); - - /* Mark the non-converged and invalid frames in the metadata. */ - if (invalidFrameCount_) - md.status = FrameMetadata::Status::FrameError; - else if (startupFrameCount_) - md.status = FrameMetadata::Status::FrameStartup; - + if (!dropFrameCount_ && request && request->findBuffer(stream) == buffer) { /* * Tag the buffer as completed, returning it to the * application. @@ -1457,31 +1458,42 @@ void CameraData::handleState() void CameraData::checkRequestCompleted() { - Request *request = requestQueue_.front(); - if (request->hasPendingBuffers()) - return; + bool requestCompleted = false; + /* + * If we are dropping this frame, do not touch the request, simply + * change the state to IDLE when ready. + */ + if (!dropFrameCount_) { + Request *request = requestQueue_.front(); + if (request->hasPendingBuffers()) + return; - /* Must wait for metadata to be filled in before completing. */ - if (state_ != State::IpaComplete) - return; + /* Must wait for metadata to be filled in before completing. */ + if (state_ != State::IpaComplete) + return; - LOG(RPI, Debug) << "Completing request sequence: " - << request->sequence(); + LOG(RPI, Debug) << "Completing request sequence: " + << request->sequence(); - pipe()->completeRequest(request); - requestQueue_.pop(); + pipe()->completeRequest(request); + requestQueue_.pop(); + requestCompleted = true; + } - LOG(RPI, Debug) << "Going into Idle state"; - state_ = State::Idle; - - if (invalidFrameCount_) { - invalidFrameCount_--; - LOG(RPI, Debug) << "Decrementing invalid frames to " - << invalidFrameCount_; - } else if (startupFrameCount_) { - startupFrameCount_--; - LOG(RPI, Debug) << "Decrementing startup frames to " - << startupFrameCount_; + /* + * Make sure we have three outputs completed in the case of a dropped + * frame. + */ + if (state_ == State::IpaComplete && + ((ispOutputCount_ == ispOutputTotal_ && dropFrameCount_) || + requestCompleted)) { + LOG(RPI, Debug) << "Going into Idle state"; + state_ = State::Idle; + if (dropFrameCount_) { + dropFrameCount_--; + LOG(RPI, Debug) << "Dropping frame at the request of the IPA (" + << dropFrameCount_ << " left)"; + } } } @@ -1489,8 +1501,6 @@ void CameraData::fillRequestMetadata(const ControlList &bufferControls, Request { request->metadata().set(controls::SensorTimestamp, bufferControls.get(controls::SensorTimestamp).value_or(0)); - request->metadata().set(controls::FrameWallClock, - bufferControls.get(controls::FrameWallClock).value_or(0)); if (cropParams_.size()) { std::vector crops; diff --git a/src/libcamera/pipeline/rpi/common/pipeline_base.h b/src/libcamera/pipeline/rpi/common/pipeline_base.h index 4bce4ec4f..aae0c2f35 100644 --- a/src/libcamera/pipeline/rpi/common/pipeline_base.h +++ b/src/libcamera/pipeline/rpi/common/pipeline_base.h @@ -20,7 +20,6 @@ #include "libcamera/internal/bayer_format.h" #include "libcamera/internal/camera.h" #include "libcamera/internal/camera_sensor.h" -#include "libcamera/internal/clock_recovery.h" #include "libcamera/internal/framebuffer.h" #include "libcamera/internal/media_device.h" #include "libcamera/internal/media_object.h" @@ -49,7 +48,8 @@ class CameraData : public Camera::Private public: CameraData(PipelineHandler *pipe) : Camera::Private(pipe), state_(State::Stopped), - startupFrameCount_(0), invalidFrameCount_(0), buffersAllocated_(false) + dropFrameCount_(0), buffersAllocated_(false), + ispOutputCount_(0), ispOutputTotal_(0) { } @@ -68,7 +68,7 @@ public: void freeBuffers(); virtual void platformFreeBuffers() = 0; - bool enumerateVideoDevices(MediaLink *link, const std::string &frontend); + void enumerateVideoDevices(MediaLink *link, const std::string &frontend); int loadPipelineConfiguration(); int loadIPA(ipa::RPi::InitResult *result); @@ -151,8 +151,7 @@ public: /* Mapping of CropParams keyed by the output stream order in CameraConfiguration */ std::map cropParams_; - unsigned int startupFrameCount_; - unsigned int invalidFrameCount_; + unsigned int dropFrameCount_; /* * If set, this stores the value that represets a gain of one for @@ -164,6 +163,11 @@ public: bool buffersAllocated_; struct Config { + /* + * Override any request from the IPA to drop a number of startup + * frames. + */ + bool disableStartupFrameDrops; /* * Override the camera timeout value calculated by the IPA based * on frame durations. @@ -173,14 +177,15 @@ public: Config config_; - ClockRecovery wallClockRecovery_; - protected: void fillRequestMetadata(const ControlList &bufferControls, Request *request); virtual void tryRunPipeline() = 0; + unsigned int ispOutputCount_; + unsigned int ispOutputTotal_; + private: void checkRequestCompleted(); }; diff --git a/src/libcamera/pipeline/rpi/pisp/data/example.yaml b/src/libcamera/pipeline/rpi/pisp/data/example.yaml index baf03be79..d67e654a8 100644 --- a/src/libcamera/pipeline/rpi/pisp/data/example.yaml +++ b/src/libcamera/pipeline/rpi/pisp/data/example.yaml @@ -16,6 +16,11 @@ # # "num_cfe_config_queue": 2, + # Override any request from the IPA to drop a number of startup + # frames. + # + # "disable_startup_frame_drops": false, + # Custom timeout value (in ms) for camera to use. This overrides # the value computed by the pipeline handler based on frame # durations. diff --git a/src/libcamera/pipeline/rpi/pisp/pisp.cpp b/src/libcamera/pipeline/rpi/pisp/pisp.cpp index 92b9070c1..91e7f4c94 100644 --- a/src/libcamera/pipeline/rpi/pisp/pisp.cpp +++ b/src/libcamera/pipeline/rpi/pisp/pisp.cpp @@ -1755,15 +1755,9 @@ void PiSPCameraData::cfeBufferDequeue(FrameBuffer *buffer) auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence); /* * Add the frame timestamp to the ControlList for the IPA to use - * as it does not receive the FrameBuffer object. Also derive a - * corresponding wallclock value. + * as it does not receive the FrameBuffer object. */ - wallClockRecovery_.addSample(); - uint64_t sensorTimestamp = buffer->metadata().timestamp; - uint64_t wallClockTimestamp = wallClockRecovery_.getOutput(sensorTimestamp); - - ctrl.set(controls::SensorTimestamp, sensorTimestamp); - ctrl.set(controls::FrameWallClock, wallClockTimestamp); + ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp); job.sensorControls = std::move(ctrl); job.delayContext = delayContext; } else if (stream == &cfe_[Cfe::Config]) { @@ -1840,6 +1834,12 @@ void PiSPCameraData::beOutputDequeue(FrameBuffer *buffer) dmabufSyncEnd(buffer->planes()[0].fd); handleStreamBuffer(buffer, stream); + + /* + * Increment the number of ISP outputs generated. + * This is needed to track dropped frames. + */ + ispOutputCount_++; handleState(); } @@ -1885,6 +1885,7 @@ void PiSPCameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers, bool * If there is no need to run the Backend, just signal that the * input buffer is completed and all Backend outputs are ready. */ + ispOutputCount_ = ispOutputTotal_; buffer = cfe_[Cfe::Output0].getBuffers().at(bayerId).buffer; handleStreamBuffer(buffer, &cfe_[Cfe::Output0]); } else @@ -1993,6 +1994,7 @@ int PiSPCameraData::configureBe(const std::optional &yuvColorSpace) global.bayer_enables |= PISP_BE_BAYER_ENABLE_INPUT; global.bayer_order = toPiSPBayerOrder(cfeFormat.fourcc); + ispOutputTotal_ = 1; /* Config buffer */ if (PISP_IMAGE_FORMAT_COMPRESSED(inputFormat.format)) { pisp_decompress_config decompress; decompress.offset = DefaultCompressionOffset; @@ -2023,6 +2025,7 @@ int PiSPCameraData::configureBe(const std::optional &yuvColorSpace) setupOutputClipping(ispFormat0, outputFormat0); be_->SetOutputFormat(0, outputFormat0); + ispOutputTotal_++; } if (global.rgb_enables & PISP_BE_RGB_ENABLE_OUTPUT1) { @@ -2046,6 +2049,7 @@ int PiSPCameraData::configureBe(const std::optional &yuvColorSpace) setupOutputClipping(ispFormat1, outputFormat1); be_->SetOutputFormat(1, outputFormat1); + ispOutputTotal_++; } /* Setup the TDN I/O blocks in case TDN gets turned on later. */ @@ -2252,6 +2256,8 @@ void PiSPCameraData::prepareCfe() void PiSPCameraData::prepareBe(uint32_t bufferId, bool stitchSwapBuffers) { + ispOutputCount_ = 0; + FrameBuffer *buffer = cfe_[Cfe::Output0].getBuffers().at(bufferId).buffer; LOG(RPI, Debug) << "Input re-queue to ISP, buffer id " << bufferId diff --git a/src/libcamera/pipeline/rpi/vc4/data/example.yaml b/src/libcamera/pipeline/rpi/vc4/data/example.yaml index 27e543488..b8e01adea 100644 --- a/src/libcamera/pipeline/rpi/vc4/data/example.yaml +++ b/src/libcamera/pipeline/rpi/vc4/data/example.yaml @@ -29,6 +29,11 @@ # # "min_total_unicam_buffers": 4, + # Override any request from the IPA to drop a number of startup + # frames. + # + # "disable_startup_frame_drops": false, + # Custom timeout value (in ms) for camera to use. This overrides # the value computed by the pipeline handler based on frame # durations. diff --git a/src/libcamera/pipeline/rpi/vc4/vc4.cpp b/src/libcamera/pipeline/rpi/vc4/vc4.cpp index 5cadef527..fe910bdf2 100644 --- a/src/libcamera/pipeline/rpi/vc4/vc4.cpp +++ b/src/libcamera/pipeline/rpi/vc4/vc4.cpp @@ -597,6 +597,8 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi stream->setFlags(StreamFlag::External); } + ispOutputTotal_ = outStreams.size(); + /* * If ISP::Output0 stream has not been configured by the application, * we must allow the hardware to generate an output so that the data @@ -623,6 +625,8 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi return -EINVAL; } + ispOutputTotal_++; + LOG(RPI, Debug) << "Defaulting ISP Output0 format to " << format; } @@ -658,6 +662,8 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi << ret; return -EINVAL; } + + ispOutputTotal_++; } /* ISP statistics output format. */ @@ -670,6 +676,8 @@ int Vc4CameraData::platformConfigure(const RPi::RPiCameraConfiguration *rpiConfi return ret; } + ispOutputTotal_++; + /* * Configure the Unicam embedded data output format only if the sensor * supports it. @@ -773,15 +781,9 @@ void Vc4CameraData::unicamBufferDequeue(FrameBuffer *buffer) auto [ctrl, delayContext] = delayedCtrls_->get(buffer->metadata().sequence); /* * Add the frame timestamp to the ControlList for the IPA to use - * as it does not receive the FrameBuffer object. Also derive a - * corresponding wallclock value. + * as it does not receive the FrameBuffer object. */ - wallClockRecovery_.addSample(); - uint64_t sensorTimestamp = buffer->metadata().timestamp; - uint64_t wallClockTimestamp = wallClockRecovery_.getOutput(sensorTimestamp); - - ctrl.set(controls::SensorTimestamp, sensorTimestamp); - ctrl.set(controls::FrameWallClock, wallClockTimestamp); + ctrl.set(controls::SensorTimestamp, buffer->metadata().timestamp); bayerQueue_.push({ buffer, std::move(ctrl), delayContext }); } else { embeddedQueue_.push(buffer); @@ -841,6 +843,12 @@ void Vc4CameraData::ispOutputDequeue(FrameBuffer *buffer) handleStreamBuffer(buffer, stream); } + /* + * Increment the number of ISP outputs generated. + * This is needed to track dropped frames. + */ + ispOutputCount_++; + handleState(); } @@ -872,6 +880,7 @@ void Vc4CameraData::prepareIspComplete(const ipa::RPi::BufferIds &buffers, << ", timestamp: " << buffer->metadata().timestamp; isp_[Isp::Input].queueBuffer(buffer); + ispOutputCount_ = 0; if (sensorMetadata_ && embeddedId) { buffer = unicam_[Unicam::Embedded].getBuffers().at(embeddedId & RPi::MaskID).buffer; diff --git a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp index 4b5816dfd..58aa0eb4c 100644 --- a/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp +++ b/src/libcamera/pipeline/uvcvideo/uvcvideo.cpp @@ -100,7 +100,7 @@ public: private: int processControl(const UVCCameraData *data, ControlList *controls, unsigned int id, const ControlValue &value); - int processControls(UVCCameraData *data, const ControlList &reqControls); + int processControls(UVCCameraData *data, Request *request); bool acquireDevice(Camera *camera) override; void releaseDevice(Camera *camera) override; @@ -287,7 +287,7 @@ int PipelineHandlerUVC::exportFrameBuffers(Camera *camera, Stream *stream, return data->video_->exportBuffers(count, buffers); } -int PipelineHandlerUVC::start(Camera *camera, const ControlList *controls) +int PipelineHandlerUVC::start(Camera *camera, [[maybe_unused]] const ControlList *controls) { UVCCameraData *data = cameraData(camera); unsigned int count = data->stream_.configuration().bufferCount; @@ -296,22 +296,13 @@ int PipelineHandlerUVC::start(Camera *camera, const ControlList *controls) if (ret < 0) return ret; - if (controls) { - ret = processControls(data, *controls); - if (ret < 0) - goto err_release_buffers; + ret = data->video_->streamOn(); + if (ret < 0) { + data->video_->releaseBuffers(); + return ret; } - ret = data->video_->streamOn(); - if (ret < 0) - goto err_release_buffers; - return 0; - -err_release_buffers: - data->video_->releaseBuffers(); - - return ret; } void PipelineHandlerUVC::stopDevice(Camera *camera) @@ -340,8 +331,6 @@ int PipelineHandlerUVC::processControl(const UVCCameraData *data, ControlList *c cid = V4L2_CID_GAIN; else if (id == controls::Gamma) cid = V4L2_CID_GAMMA; - else if (id == controls::AeEnable) - return 0; /* Handled in `Camera::queueRequest()`. */ else return -EINVAL; @@ -421,11 +410,11 @@ int PipelineHandlerUVC::processControl(const UVCCameraData *data, ControlList *c return 0; } -int PipelineHandlerUVC::processControls(UVCCameraData *data, const ControlList &reqControls) +int PipelineHandlerUVC::processControls(UVCCameraData *data, Request *request) { ControlList controls(data->video_->controls()); - for (const auto &[id, value] : reqControls) + for (const auto &[id, value] : request->controls()) processControl(data, &controls, id, value); for (const auto &ctrl : controls) @@ -453,7 +442,7 @@ int PipelineHandlerUVC::queueRequestDevice(Camera *camera, Request *request) return -ENOENT; } - int ret = processControls(data, request->controls()); + int ret = processControls(data, request); if (ret < 0) return ret; diff --git a/src/libcamera/pipeline_handler.cpp b/src/libcamera/pipeline_handler.cpp index 31d501a13..d84dff3c9 100644 --- a/src/libcamera/pipeline_handler.cpp +++ b/src/libcamera/pipeline_handler.cpp @@ -372,8 +372,6 @@ void PipelineHandler::stop(Camera *camera) /* Make sure no requests are pending. */ Camera::Private *data = camera->_d(); - // WIP: Just clean for now, idk maybe something wrong with thread sync? - data->queuedRequests_.clear(); ASSERT(data->queuedRequests_.empty()); data->requestSequence_ = 0; diff --git a/src/libcamera/process.cpp b/src/libcamera/process.cpp index 0eae68072..d836fb07a 100644 --- a/src/libcamera/process.cpp +++ b/src/libcamera/process.cpp @@ -241,12 +241,7 @@ int Process::start(const std::string &path, int ret; if (running_) - return -EBUSY; - - for (int fd : fds) { - if (fd < 0) - return -EINVAL; - } + return 0; int childPid = fork(); if (childPid == -1) { @@ -284,15 +279,14 @@ int Process::start(const std::string &path, if (file && strcmp(file, "syslog")) unsetenv("LIBCAMERA_LOG_FILE"); - const size_t len = args.size(); - auto argv = std::make_unique(len + 2); - + const char **argv = new const char *[args.size() + 2]; + unsigned int len = args.size(); argv[0] = path.c_str(); - for (size_t i = 0; i < len; i++) + for (unsigned int i = 0; i < len; i++) argv[i + 1] = args[i].c_str(); argv[len + 1] = nullptr; - execv(path.c_str(), const_cast(argv.get())); + execv(path.c_str(), (char **)argv); _exit(EXIT_FAILURE); } @@ -303,8 +297,6 @@ void Process::closeAllFdsExcept(const std::vector &fds) std::vector v(fds); sort(v.begin(), v.end()); - ASSERT(v.empty() || v.front() >= 0); - DIR *dir = opendir("/proc/self/fd"); if (!dir) return; diff --git a/src/libcamera/sensor/camera_sensor.cpp b/src/libcamera/sensor/camera_sensor.cpp index 4f2fd2690..d19b5e2e7 100644 --- a/src/libcamera/sensor/camera_sensor.cpp +++ b/src/libcamera/sensor/camera_sensor.cpp @@ -302,9 +302,8 @@ int CameraSensor::setEmbeddedDataEnabled(bool enable) * camera sensor, likely at configure() time. * * If the requested \a orientation cannot be obtained, the \a orientation - * parameter is adjusted to report the native image orientation (i.e. resulting - * from the physical mounting rotation of the camera sensor, without any - * transformation) and Transform::Identity is returned. + * parameter is adjusted to report the current image orientation and + * Transform::Identity is returned. * * If the requested \a orientation can be obtained, the function computes a * Transform and does not adjust \a orientation. diff --git a/src/libcamera/software_isp/benchmark.cpp b/src/libcamera/software_isp/benchmark.cpp new file mode 100644 index 000000000..b3da3c416 --- /dev/null +++ b/src/libcamera/software_isp/benchmark.cpp @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Red Hat Inc. + * + * Authors: + * Hans de Goede + * + * Simple builtin benchmark to measure software ISP processing times + */ + +#include "libcamera/internal/software_isp/benchmark.h" + +#include + +namespace libcamera { + +LOG_DEFINE_CATEGORY(Benchmark) + +/** + * \class Benchmark + * \brief Simple builtin benchmark + * + * Simple builtin benchmark to measure software ISP processing times. + */ + +/** + * \brief Constructs a Benchmark object + */ +Benchmark::Benchmark() + : measuredFrames_(0), frameProcessTime_(0) +{ +} + +Benchmark::~Benchmark() +{ +} + +static inline int64_t timeDiff(timespec &after, timespec &before) +{ + return (after.tv_sec - before.tv_sec) * 1000000000LL + + (int64_t)after.tv_nsec - (int64_t)before.tv_nsec; +} + +/** + * \brief Start measuring process time for a single frame + * + * Call this function before processing frame data to start measuring + * the process time for a frame. + */ +void Benchmark::startFrame(void) +{ + if (measuredFrames_ >= Benchmark::kLastFrameToMeasure) + return; + + frameStartTime_ = {}; + clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime_); +} + +/** + * \brief Finish measuring process time for a single frame + * + * Call this function after processing frame data to finish measuring + * the process time for a frame. + * + * This function will log frame processing time information after + * Benchmark::kLastFrameToMeasure frames have been processed. + */ +void Benchmark::finishFrame(void) +{ + if (measuredFrames_ >= Benchmark::kLastFrameToMeasure) + return; + + measuredFrames_++; + + if (measuredFrames_ <= Benchmark::kFramesToSkip) + return; + + timespec frameEndTime = {}; + clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime); + frameProcessTime_ += timeDiff(frameEndTime, frameStartTime_); + + if (measuredFrames_ == Benchmark::kLastFrameToMeasure) { + const unsigned int measuredFrames = Benchmark::kLastFrameToMeasure - + Benchmark::kFramesToSkip; + LOG(Benchmark, Info) + << "Processed " << measuredFrames + << " frames in " << frameProcessTime_ / 1000 << "us, " + << frameProcessTime_ / (1000 * measuredFrames) + << " us/frame"; + } +} + +} /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer.cpp b/src/libcamera/software_isp/debayer.cpp index e9e18c488..d0e17d20b 100644 --- a/src/libcamera/software_isp/debayer.cpp +++ b/src/libcamera/software_isp/debayer.cpp @@ -103,6 +103,17 @@ namespace libcamera { LOG_DEFINE_CATEGORY(Debayer) +Debayer::Debayer() +{ + /* Initialize color lookup tables */ + for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) { + red_[i] = green_[i] = blue_[i] = i; + redCcm_[i] = { static_cast(i), 0, 0 }; + greenCcm_[i] = { 0, static_cast(i), 0 }; + blueCcm_[i] = { 0, 0, static_cast(i) }; + } +} + Debayer::~Debayer() { } @@ -176,4 +187,54 @@ Debayer::~Debayer() * \brief Signals when the output buffer is ready */ +/** + * \fn void Debayer::setParams(DebayerParams ¶ms) + * \brief Select the bayer params to use for the next frame debayer + * \param[in] params The parameters to be used in debayering + */ +void Debayer::setParams(DebayerParams ¶ms) +{ + green_ = params.green; + greenCcm_ = params.greenCcm; + if (swapRedBlueGains_) { + red_ = params.blue; + blue_ = params.red; + redCcm_ = params.blueCcm; + blueCcm_ = params.redCcm; + for (unsigned int i = 0; i < 256; i++) { + std::swap(redCcm_[i].r, redCcm_[i].b); + std::swap(blueCcm_[i].r, blueCcm_[i].b); + } + } else { + red_ = params.red; + blue_ = params.blue; + redCcm_ = params.redCcm; + blueCcm_ = params.blueCcm; + } + gammaLut_ = params.gammaLut; +} + +/** + * \fn void Debayer::dmaSyncBegin(DebayerParams ¶ms) + * \brief Common CPU/GPU Dma Sync Buffer begin + */ +void Debayer::dmaSyncBegin(std::vector &dmaSyncers, FrameBuffer *input, FrameBuffer *output) +{ + for (const FrameBuffer::Plane &plane : input->planes()) + dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read); + + for (const FrameBuffer::Plane &plane : output->planes()) + dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write); +} + +/** + * \fn void Debayer::isStandardBayerOrder(BayerFormat::Order order) + * \brief Common method to validate standard Bayer order + */ +bool Debayer::isStandardBayerOrder(BayerFormat::Order order) +{ + return order == BayerFormat::BGGR || order == BayerFormat::GBRG || + order == BayerFormat::GRBG || order == BayerFormat::RGGB; +} + } /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer.h b/src/libcamera/software_isp/debayer.h index ba033d440..3893318b2 100644 --- a/src/libcamera/software_isp/debayer.h +++ b/src/libcamera/software_isp/debayer.h @@ -14,11 +14,15 @@ #include #include +#include #include #include #include +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/dma_buf_allocator.h" +#include "libcamera/internal/software_isp/benchmark.h" #include "libcamera/internal/software_isp/debayer_params.h" namespace libcamera { @@ -27,9 +31,10 @@ class FrameBuffer; LOG_DECLARE_CATEGORY(Debayer) -class Debayer +class Debayer : public Object { public: + Debayer(); virtual ~Debayer() = 0; virtual int configure(const StreamConfiguration &inputCfg, @@ -45,11 +50,45 @@ public: virtual SizeRange sizes(PixelFormat inputFormat, const Size &inputSize) = 0; + virtual const SharedFD &getStatsFD() = 0; + + unsigned int frameSize() { return outputConfig_.frameSize; } + Signal inputBufferReady; Signal outputBufferReady; + struct DebayerInputConfig { + Size patternSize; + unsigned int bpp; /* Memory used per pixel, not precision */ + unsigned int stride; + std::vector outputFormats; + }; + + struct DebayerOutputConfig { + unsigned int bpp; /* Memory used per pixel, not precision */ + unsigned int stride; + unsigned int frameSize; + }; + + DebayerInputConfig inputConfig_; + DebayerOutputConfig outputConfig_; + DebayerParams::LookupTable red_; + DebayerParams::LookupTable green_; + DebayerParams::LookupTable blue_; + DebayerParams::CcmLookupTable redCcm_; + DebayerParams::CcmLookupTable greenCcm_; + DebayerParams::CcmLookupTable blueCcm_; + DebayerParams::LookupTable gammaLut_; + bool swapRedBlueGains_; + Benchmark bench_; + private: virtual Size patternSize(PixelFormat inputFormat) = 0; + +protected: + void setParams(DebayerParams ¶ms); + void dmaSyncBegin(std::vector &dmaSyncers, FrameBuffer *input, FrameBuffer *output); + static bool isStandardBayerOrder(BayerFormat::Order order); }; } /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer_cpu.cpp b/src/libcamera/software_isp/debayer_cpu.cpp index c6f070225..e56492848 100644 --- a/src/libcamera/software_isp/debayer_cpu.cpp +++ b/src/libcamera/software_isp/debayer_cpu.cpp @@ -22,7 +22,6 @@ #include #include "libcamera/internal/bayer_format.h" -#include "libcamera/internal/dma_buf_allocator.h" #include "libcamera/internal/framebuffer.h" #include "libcamera/internal/mapped_framebuffer.h" @@ -40,7 +39,7 @@ namespace libcamera { * \param[in] stats Pointer to the stats object to use */ DebayerCpu::DebayerCpu(std::unique_ptr stats) - : stats_(std::move(stats)) + : Debayer(), stats_(std::move(stats)) { /* * Reading from uncached buffers may be very slow. @@ -51,14 +50,6 @@ DebayerCpu::DebayerCpu(std::unique_ptr stats) * future. */ enableInputMemcpy_ = true; - - /* Initialize color lookup tables */ - for (unsigned int i = 0; i < DebayerParams::kRGBLookupSize; i++) { - red_[i] = green_[i] = blue_[i] = i; - redCcm_[i] = { static_cast(i), 0, 0 }; - greenCcm_[i] = { 0, static_cast(i), 0 }; - blueCcm_[i] = { 0, 0, static_cast(i) }; - } } DebayerCpu::~DebayerCpu() = default; @@ -291,12 +282,6 @@ void DebayerCpu::debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]) } } -static bool isStandardBayerOrder(BayerFormat::Order order) -{ - return order == BayerFormat::BGGR || order == BayerFormat::GBRG || - order == BayerFormat::GRBG || order == BayerFormat::RGGB; -} - /* * Setup the Debayer object according to the passed in parameters. * Return 0 on success, a negative errno value on failure @@ -554,9 +539,6 @@ int DebayerCpu::configure(const StreamConfiguration &inputCfg, lineBuffers_[i].resize(lineBufferLength_); } - measuredFrames_ = 0; - frameProcessTime_ = 0; - return 0; } @@ -668,7 +650,7 @@ void DebayerCpu::process2(const uint8_t *src, uint8_t *dst) for (unsigned int y = window_.y; y < yEnd; y += 2) { shiftLinePointers(linePointers, src); memcpyNextLine(linePointers); - if (this->enable_statistic) stats_->processLine0(y, linePointers); + stats_->processLine0(y, linePointers); (this->*debayer0_)(dst, linePointers); src += inputConfig_.stride; dst += outputConfig_.stride; @@ -683,7 +665,7 @@ void DebayerCpu::process2(const uint8_t *src, uint8_t *dst) if (window_.y == 0) { shiftLinePointers(linePointers, src); memcpyNextLine(linePointers); - if (this->enable_statistic) stats_->processLine0(yEnd, linePointers); + stats_->processLine0(yEnd, linePointers); (this->*debayer0_)(dst, linePointers); src += inputConfig_.stride; dst += outputConfig_.stride; @@ -720,7 +702,7 @@ void DebayerCpu::process4(const uint8_t *src, uint8_t *dst) for (unsigned int y = window_.y; y < yEnd; y += 4) { shiftLinePointers(linePointers, src); memcpyNextLine(linePointers); - if (this->enable_statistic) stats_->processLine0(y, linePointers); + stats_->processLine0(y, linePointers); (this->*debayer0_)(dst, linePointers); src += inputConfig_.stride; dst += outputConfig_.stride; @@ -733,7 +715,7 @@ void DebayerCpu::process4(const uint8_t *src, uint8_t *dst) shiftLinePointers(linePointers, src); memcpyNextLine(linePointers); - if (this->enable_statistic) stats_->processLine2(y, linePointers); + stats_->processLine2(y, linePointers); (this->*debayer2_)(dst, linePointers); src += inputConfig_.stride; dst += outputConfig_.stride; @@ -746,50 +728,15 @@ void DebayerCpu::process4(const uint8_t *src, uint8_t *dst) } } -namespace { - -inline int64_t timeDiff(timespec &after, timespec &before) -{ - return (after.tv_sec - before.tv_sec) * 1000000000LL + - (int64_t)after.tv_nsec - (int64_t)before.tv_nsec; -} - -} /* namespace */ - void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) { - timespec frameStartTime; - - if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure) { - frameStartTime = {}; - clock_gettime(CLOCK_MONOTONIC_RAW, &frameStartTime); - } + bench_.startFrame(); std::vector dmaSyncers; - for (const FrameBuffer::Plane &plane : input->planes()) - dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Read); - for (const FrameBuffer::Plane &plane : output->planes()) - dmaSyncers.emplace_back(plane.fd, DmaSyncer::SyncType::Write); - enable_statistic = params.collect_stats; - green_ = params.green; - greenCcm_ = params.greenCcm; - if (swapRedBlueGains_) { - red_ = params.blue; - blue_ = params.red; - redCcm_ = params.blueCcm; - blueCcm_ = params.redCcm; - for (unsigned int i = 0; i < 256; i++) { - std::swap(redCcm_[i].r, redCcm_[i].b); - std::swap(blueCcm_[i].r, blueCcm_[i].b); - } - } else { - red_ = params.red; - blue_ = params.blue; - redCcm_ = params.redCcm; - blueCcm_ = params.blueCcm; - } - gammaLut_ = params.gammaLut; + dmaSyncBegin(dmaSyncers, input, output); + + setParams(params); /* Copy metadata from the input buffer */ FrameMetadata &metadata = output->_d()->metadata(); @@ -805,7 +752,7 @@ void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output return; } - if(this->enable_statistic) stats_->startFrame(); + stats_->startFrame(); if (inputConfig_.patternSize.height == 2) process2(in.planes()[0].data(), out.planes()[0].data()); @@ -817,21 +764,7 @@ void DebayerCpu::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output dmaSyncers.clear(); /* Measure before emitting signals */ - if (measuredFrames_ < DebayerCpu::kLastFrameToMeasure && - ++measuredFrames_ > DebayerCpu::kFramesToSkip) { - timespec frameEndTime = {}; - clock_gettime(CLOCK_MONOTONIC_RAW, &frameEndTime); - frameProcessTime_ += timeDiff(frameEndTime, frameStartTime); - if (measuredFrames_ == DebayerCpu::kLastFrameToMeasure) { - const unsigned int measuredFrames = DebayerCpu::kLastFrameToMeasure - - DebayerCpu::kFramesToSkip; - LOG(Debayer, Info) - << "Processed " << measuredFrames - << " frames in " << frameProcessTime_ / 1000 << "us, " - << frameProcessTime_ / (1000 * measuredFrames) - << " us/frame"; - } - } + bench_.finishFrame(); /* * Buffer ids are currently not used, so pass zeros as its parameter. diff --git a/src/libcamera/software_isp/debayer_cpu.h b/src/libcamera/software_isp/debayer_cpu.h index 54304053f..999e3421c 100644 --- a/src/libcamera/software_isp/debayer_cpu.h +++ b/src/libcamera/software_isp/debayer_cpu.h @@ -17,14 +17,13 @@ #include -#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/software_isp/swstats_cpu.h" #include "debayer.h" -#include "swstats_cpu.h" namespace libcamera { -class DebayerCpu : public Debayer, public Object +class DebayerCpu : public Debayer { public: DebayerCpu(std::unique_ptr stats); @@ -47,13 +46,6 @@ public: */ const SharedFD &getStatsFD() { return stats_->getStatsFD(); } - /** - * \brief Get the output frame size - * - * \return The output frame size - */ - unsigned int frameSize() { return outputConfig_.frameSize; } - private: /** * \brief Called to debayer 1 line of Bayer input data to output format @@ -110,21 +102,8 @@ private: template void debayer10P_RGRG_BGR888(uint8_t *dst, const uint8_t *src[]); - struct DebayerInputConfig { - Size patternSize; - unsigned int bpp; /* Memory used per pixel, not precision */ - unsigned int stride; - std::vector outputFormats; - }; - - struct DebayerOutputConfig { - unsigned int bpp; /* Memory used per pixel, not precision */ - unsigned int stride; - unsigned int frameSize; - }; - - int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config); - int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config); + static int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config); + static int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config); int setupStandardBayerOrder(BayerFormat::Order order); int setDebayerFunctions(PixelFormat inputFormat, PixelFormat outputFormat, @@ -138,20 +117,11 @@ private: /* Max. supported Bayer pattern height is 4, debayering this requires 5 lines */ static constexpr unsigned int kMaxLineBuffers = 5; - DebayerParams::LookupTable red_; - DebayerParams::LookupTable green_; - DebayerParams::LookupTable blue_; - DebayerParams::CcmLookupTable redCcm_; - DebayerParams::CcmLookupTable greenCcm_; - DebayerParams::CcmLookupTable blueCcm_; - DebayerParams::LookupTable gammaLut_; debayerFn debayer0_; debayerFn debayer1_; debayerFn debayer2_; debayerFn debayer3_; Rectangle window_; - DebayerInputConfig inputConfig_; - DebayerOutputConfig outputConfig_; std::unique_ptr stats_; std::vector lineBuffers_[kMaxLineBuffers]; unsigned int lineBufferLength_; @@ -159,13 +129,6 @@ private: unsigned int lineBufferIndex_; unsigned int xShift_; /* Offset of 0/1 applied to window_.x */ bool enableInputMemcpy_; - bool swapRedBlueGains_; - unsigned int measuredFrames_; - int64_t frameProcessTime_; - /* Skip 30 frames for things to stabilize then measure 30 frames */ - static constexpr unsigned int kFramesToSkip = 30; - static constexpr unsigned int kLastFrameToMeasure = 60; - bool enable_statistic = true; }; } /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer_egl.cpp b/src/libcamera/software_isp/debayer_egl.cpp new file mode 100644 index 000000000..9ec966608 --- /dev/null +++ b/src/libcamera/software_isp/debayer_egl.cpp @@ -0,0 +1,632 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2024, Linaro Ltd. + * + * Authors: + * Bryan O'Donoghue + * + * debayer_cpu.cpp - EGL based debayering class + */ + +#include +#include +#include + +#include + +#include "libcamera/internal/glsl_shaders.h" +#include "debayer_egl.h" + +namespace libcamera { + +DebayerEGL::DebayerEGL(std::unique_ptr stats) + : Debayer(), stats_(std::move(stats)) +{ + eglImageBayerIn_ = eglImageRedLookup_ = eglImageBlueLookup_ = eglImageGreenLookup_ = NULL; +} + +DebayerEGL::~DebayerEGL() +{ + if (eglImageBlueLookup_) + delete eglImageBlueLookup_; + + if (eglImageGreenLookup_) + delete eglImageGreenLookup_; + + if (eglImageRedLookup_) + delete eglImageRedLookup_; + + if (eglImageBayerIn_) + delete eglImageBayerIn_; +} + +int DebayerEGL::getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config) +{ + BayerFormat bayerFormat = + BayerFormat::fromPixelFormat(inputFormat); + + if ((bayerFormat.bitDepth == 8 || bayerFormat.bitDepth == 10) && + bayerFormat.packing == BayerFormat::Packing::None && + isStandardBayerOrder(bayerFormat.order)) { + config.bpp = (bayerFormat.bitDepth + 7) & ~7; + config.patternSize.width = 2; + config.patternSize.height = 2; + config.outputFormats = std::vector({ formats::XRGB8888, + formats::ARGB8888, + formats::XBGR8888, + formats::ABGR8888 }); + return 0; + } + + if (bayerFormat.bitDepth == 10 && + bayerFormat.packing == BayerFormat::Packing::CSI2 && + isStandardBayerOrder(bayerFormat.order)) { + config.bpp = 10; + config.patternSize.width = 4; /* 5 bytes per *4* pixels */ + config.patternSize.height = 2; + config.outputFormats = std::vector({ formats::XRGB8888, + formats::ARGB8888, + formats::XBGR8888, + formats::ABGR8888 }); + return 0; + } + + LOG(Debayer, Info) + << "Unsupported input format " << inputFormat.toString(); + return -EINVAL; +} + +int DebayerEGL::getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config) +{ + if (outputFormat == formats::XRGB8888 || outputFormat == formats::ARGB8888 || + outputFormat == formats::XBGR8888 || outputFormat == formats::ABGR8888) { + config.bpp = 32; + return 0; + } + + LOG(Debayer, Error) + << "Unsupported output format " << outputFormat.toString(); + + return -EINVAL; +} + +int DebayerEGL::getShaderVariableLocations(void) +{ + attributeVertex_ = glGetAttribLocation(programId_, "vertexIn"); + attributeTexture_ = glGetAttribLocation(programId_, "textureIn"); + + textureUniformBayerDataIn_ = glGetUniformLocation(programId_, "tex_y"); + textureUniformRedLookupDataIn_ = glGetUniformLocation(programId_, "red_param"); + textureUniformGreenLookupDataIn_ = glGetUniformLocation(programId_, "green_param"); + textureUniformBlueLookupDataIn_ = glGetUniformLocation(programId_, "blue_param"); + ccmUniformDataIn_ = glGetUniformLocation(programId_, "ccm"); + + textureUniformStep_ = glGetUniformLocation(programId_, "tex_step"); + textureUniformSize_ = glGetUniformLocation(programId_, "tex_size"); + textureUniformStrideFactor_ = glGetUniformLocation(programId_, "stride_factor"); + textureUniformBayerFirstRed_ = glGetUniformLocation(programId_, "tex_bayer_first_red"); + textureUniformProjMatrix_ = glGetUniformLocation(programId_, "proj_matrix"); + + LOG(Debayer, Debug) << "vertexIn " << attributeVertex_ << " textureIn " << attributeTexture_ + << " tex_y " << textureUniformBayerDataIn_ + << " red_param " << textureUniformRedLookupDataIn_ + << " green_param " << textureUniformGreenLookupDataIn_ + << " blue_param " << textureUniformBlueLookupDataIn_ + << " ccm " << ccmUniformDataIn_ + << " tex_step " << textureUniformStep_ + << " tex_size " << textureUniformSize_ + << " stride_factor " << textureUniformStrideFactor_ + << " tex_bayer_first_red " << textureUniformBayerFirstRed_ + << " proj_matrix " << textureUniformProjMatrix_; + return 0; +} + +int DebayerEGL::initBayerShaders(PixelFormat inputFormat, PixelFormat outputFormat) +{ + std::vector shaderEnv; + unsigned int fragmentShaderDataLen; + unsigned char *fragmentShaderData; + unsigned int vertexShaderDataLen; + unsigned char *vertexShaderData; + GLenum err; + + // Target gles 100 glsl requires "#version x" as first directive in shader + egl_.pushEnv(shaderEnv, "#version 100"); + + // Specify GL_OES_EGL_image_external + egl_.pushEnv(shaderEnv, "#extension GL_OES_EGL_image_external: enable"); + + // Tell shaders how to re-order output taking account of how the + // pixels are actually stored by GBM + switch (outputFormat) { + case formats::ARGB8888: + case formats::XRGB8888: + break; + case formats::ABGR8888: + case formats::XBGR8888: + egl_.pushEnv(shaderEnv, "#define SWAP_BLUE"); + break; + default: + goto invalid_fmt; + } + + // Pixel location parameters + glFormat_ = GL_LUMINANCE; + bytesPerPixel_ = 1; + switch (inputFormat) { + case libcamera::formats::SBGGR8: + case libcamera::formats::SBGGR10_CSI2P: + case libcamera::formats::SBGGR12_CSI2P: + firstRed_x_ = 1.0; + firstRed_y_ = 1.0; + break; + case libcamera::formats::SGBRG8: + case libcamera::formats::SGBRG10_CSI2P: + case libcamera::formats::SGBRG12_CSI2P: + firstRed_x_ = 0.0; + firstRed_y_ = 1.0; + break; + case libcamera::formats::SGRBG8: + case libcamera::formats::SGRBG10_CSI2P: + case libcamera::formats::SGRBG12_CSI2P: + firstRed_x_ = 1.0; + firstRed_y_ = 0.0; + break; + case libcamera::formats::SRGGB8: + case libcamera::formats::SRGGB10_CSI2P: + case libcamera::formats::SRGGB12_CSI2P: + firstRed_x_ = 0.0; + firstRed_y_ = 0.0; + break; + default: + goto invalid_fmt; + break; + }; + + // Shader selection + switch (inputFormat) { + case libcamera::formats::SBGGR8: + case libcamera::formats::SGBRG8: + case libcamera::formats::SGRBG8: + case libcamera::formats::SRGGB8: + fragmentShaderData = bayer_unpacked_frag; + fragmentShaderDataLen = bayer_unpacked_frag_len; + vertexShaderData = bayer_unpacked_vert; + vertexShaderDataLen = bayer_unpacked_vert_len; + break; + case libcamera::formats::SBGGR10_CSI2P: + case libcamera::formats::SGBRG10_CSI2P: + case libcamera::formats::SGRBG10_CSI2P: + case libcamera::formats::SRGGB10_CSI2P: + egl_.pushEnv(shaderEnv, "#define RAW10P"); + if (BayerFormat::fromPixelFormat(inputFormat).packing == BayerFormat::Packing::None) { + fragmentShaderData = bayer_unpacked_frag; + fragmentShaderDataLen = bayer_unpacked_frag_len; + vertexShaderData = bayer_unpacked_vert; + vertexShaderDataLen = bayer_unpacked_vert_len; + glFormat_ = GL_RG; + bytesPerPixel_ = 2; + } else { + fragmentShaderData = bayer_1x_packed_frag; + fragmentShaderDataLen = bayer_1x_packed_frag_len; + vertexShaderData = identity_vert; + vertexShaderDataLen = identity_vert_len; + } + break; + case libcamera::formats::SBGGR12_CSI2P: + case libcamera::formats::SGBRG12_CSI2P: + case libcamera::formats::SGRBG12_CSI2P: + case libcamera::formats::SRGGB12_CSI2P: + egl_.pushEnv(shaderEnv, "#define RAW12P"); + if (BayerFormat::fromPixelFormat(inputFormat).packing == BayerFormat::Packing::None) { + fragmentShaderData = bayer_unpacked_frag; + fragmentShaderDataLen = bayer_unpacked_frag_len; + vertexShaderData = bayer_unpacked_vert; + vertexShaderDataLen = bayer_unpacked_vert_len; + glFormat_ = GL_RG; + bytesPerPixel_ = 2; + } else { + fragmentShaderData = bayer_1x_packed_frag; + fragmentShaderDataLen = bayer_1x_packed_frag_len; + vertexShaderData = identity_vert; + vertexShaderDataLen = identity_vert_len; + } + break; + default: + goto invalid_fmt; + break; + }; + + if (ccmEnabled_) { + // Run the CCM if available + egl_.pushEnv(shaderEnv, "#define APPLY_CCM_PARAMETERS"); + } else { + // Flag to shaders that we have parameter gain tables + egl_.pushEnv(shaderEnv, "#define APPLY_RGB_PARAMETERS"); + } + + if (egl_.compileVertexShader(vertexShaderId_, vertexShaderData, vertexShaderDataLen, shaderEnv)) + goto compile_fail; + + if (egl_.compileFragmentShader(fragmentShaderId_, fragmentShaderData, fragmentShaderDataLen, shaderEnv)) + goto compile_fail; + + if (egl_.linkProgram(programId_, vertexShaderId_, fragmentShaderId_)) + goto link_fail; + + egl_.dumpShaderSource(vertexShaderId_); + egl_.dumpShaderSource(fragmentShaderId_); + + /* Ensure we set the programId_ */ + egl_.useProgram(programId_); + err = glGetError(); + if (err != GL_NO_ERROR) + goto program_fail; + + if (getShaderVariableLocations()) + goto parameters_fail; + + return 0; + +parameters_fail: + LOG(Debayer, Error) << "Program parameters fail"; + return -ENODEV; + +program_fail: + LOG(Debayer, Error) << "Use program error " << err; + return -ENODEV; + +link_fail: + LOG(Debayer, Error) << "Linking program fail"; + return -ENODEV; + +compile_fail: + LOG(Debayer, Error) << "Compile debayer shaders fail"; + return -ENODEV; + +invalid_fmt: + LOG(Debayer, Error) << "Unsupported input output format combination"; + return -EINVAL; +} + +int DebayerEGL::configure(const StreamConfiguration &inputCfg, + const std::vector> &outputCfgs, + bool ccmEnabled) +{ + GLint maxTextureImageUnits; + + if (getInputConfig(inputCfg.pixelFormat, inputConfig_) != 0) + return -EINVAL; + + if (stats_->configure(inputCfg) != 0) + return -EINVAL; + + const Size &stats_pattern_size = stats_->patternSize(); + if (inputConfig_.patternSize.width != stats_pattern_size.width || + inputConfig_.patternSize.height != stats_pattern_size.height) { + LOG(Debayer, Error) + << "mismatching stats and debayer pattern sizes for " + << inputCfg.pixelFormat.toString(); + return -EINVAL; + } + + inputConfig_.stride = inputCfg.stride; + width_ = inputCfg.size.width; + height_ = inputCfg.size.height; + ccmEnabled_ = ccmEnabled; + + if (outputCfgs.size() != 1) { + LOG(Debayer, Error) + << "Unsupported number of output streams: " + << outputCfgs.size(); + return -EINVAL; + } + + LOG(Debayer, Info) << "Input size " << inputCfg.size << " stride " << inputCfg.stride; + + if (gbmSurface_.initSurface(inputCfg.size.width, inputCfg.size.height)) + return -ENODEV; + + if (egl_.initEGLContext(&gbmSurface_)) + return -ENODEV; + + glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS, &maxTextureImageUnits); + LOG(Debayer, Debug) << "Fragment shader maximum texture units " << maxTextureImageUnits; + + if (!ccmEnabled && maxTextureImageUnits < DEBAYER_EGL_MIN_SIMPLE_RGB_GAIN_TEXTURE_UNITS) { + LOG(Debayer, Error) << "Fragment shader texture unit count " << maxTextureImageUnits + << " required minimum for RGB gain table lookup " << DEBAYER_EGL_MIN_SIMPLE_RGB_GAIN_TEXTURE_UNITS + << " try using an identity CCM "; + return -ENODEV; + } + + // Raw bayer input as texture + eglImageBayerIn_ = new eGLImage(width_, height_, 32, GL_TEXTURE0, 0); + if (!eglImageBayerIn_) + return -ENOMEM; + + // Only do the RGB lookup table textures if CCM is disabled + if (!ccmEnabled_) { + + /// RGB correction tables as 2d textures + // eGL doesn't support glTexImage1D so we do a little hack with 2D to compensate + eglImageRedLookup_ = new eGLImage(DebayerParams::kRGBLookupSize, 1, 32, GL_TEXTURE1, 1); + if (!eglImageRedLookup_) + return -ENOMEM; + + eglImageGreenLookup_ = new eGLImage(DebayerParams::kRGBLookupSize, 1, 32, GL_TEXTURE2, 2); + if (!eglImageGreenLookup_) + return -ENOMEM; + + eglImageBlueLookup_ = new eGLImage(DebayerParams::kRGBLookupSize, 1, 32, GL_TEXTURE3, 3); + if (!eglImageBlueLookup_) + return -ENOMEM; + } + + // Create a single BO (calling gbm_surface_lock_front_buffer() again before gbm_surface_release_buffer() would create another BO) + if (gbmSurface_.mapSurface()) + return -ENODEV; + + StreamConfiguration &outputCfg = outputCfgs[0]; + SizeRange outSizeRange = sizes(inputCfg.pixelFormat, inputCfg.size); + + outputConfig_.stride = gbmSurface_.getStride(); + outputConfig_.frameSize = gbmSurface_.getFrameSize(); + + LOG(Debayer, Debug) << "Overriding stream config stride " + << outputCfg.stride << " with GBM surface stride " + << outputConfig_.stride; + outputCfg.stride = outputConfig_.stride; + + if (!outSizeRange.contains(outputCfg.size) || outputConfig_.stride != outputCfg.stride) { + LOG(Debayer, Error) + << "Invalid output size/stride: " + << "\n " << outputCfg.size << " (" << outSizeRange << ")" + << "\n " << outputCfg.stride << " (" << outputConfig_.stride << ")"; + return -EINVAL; + } + + window_.x = ((inputCfg.size.width - outputCfg.size.width) / 2) & + ~(inputConfig_.patternSize.width - 1); + window_.y = ((inputCfg.size.height - outputCfg.size.height) / 2) & + ~(inputConfig_.patternSize.height - 1); + window_.width = outputCfg.size.width; + window_.height = outputCfg.size.height; + + /* Don't pass x,y since process() already adjusts src before passing it */ + stats_->setWindow(Rectangle(window_.size())); + + LOG(Debayer, Debug) << "Input width " << inputCfg.size.width << " height " << inputCfg.size.height; + LOG(Debayer, Debug) << "Output width " << outputCfg.size.width << " height " << outputCfg.size.height; + LOG(Debayer, Debug) << "Output stride " << outputCfg.size.width << " height " << outputCfg.size.height; + + if (initBayerShaders(inputCfg.pixelFormat, outputCfg.pixelFormat)) + return -EINVAL; + + return 0; +} + +Size DebayerEGL::patternSize(PixelFormat inputFormat) +{ + DebayerEGL::DebayerInputConfig config; + + if (getInputConfig(inputFormat, config) != 0) + return {}; + + return config.patternSize; +} + +std::vector DebayerEGL::formats(PixelFormat inputFormat) +{ + DebayerEGL::DebayerInputConfig config; + + if (getInputConfig(inputFormat, config) != 0) + return std::vector(); + + return config.outputFormats; +} + +std::tuple +DebayerEGL::strideAndFrameSize(const PixelFormat &outputFormat, const Size &size) +{ + DebayerEGL::DebayerOutputConfig config; + + if (getOutputConfig(outputFormat, config) != 0) + return std::make_tuple(0, 0); + + /* round up to multiple of 8 for 64 bits alignment */ + unsigned int stride = (size.width * config.bpp / 8 + 7) & ~7; + + return std::make_tuple(stride, stride * size.height); +} + +void DebayerEGL::setShaderVariableValues(void) +{ + /* + * Raw Bayer 8-bit, and packed raw Bayer 10-bit/12-bit formats + * are stored in a GL_LUMINANCE texture. The texture width is + * equal to the stride. + */ + GLfloat firstRed[] = { firstRed_x_, firstRed_y_ }; + GLfloat imgSize[] = { (GLfloat)width_, + (GLfloat)height_ }; + GLfloat Step[] = { static_cast(bytesPerPixel_) / (inputConfig_.stride - 1), + 1.0f / (height_ - 1) }; + GLfloat Stride = 1.0f; + GLfloat projIdentityMatrix[] = { + 1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1 + }; + + // vertexIn - bayer_8.vert + glEnableVertexAttribArray(attributeVertex_); + glVertexAttribPointer(attributeVertex_, 2, GL_FLOAT, GL_TRUE, + 2 * sizeof(GLfloat), vcoordinates); + + // textureIn - bayer_8.vert + glEnableVertexAttribArray(attributeTexture_); + glVertexAttribPointer(attributeTexture_, 2, GL_FLOAT, GL_TRUE, + 2 * sizeof(GLfloat), tcoordinates); + + // Set the sampler2D to the respective texture unit for each texutre + // To simultaneously sample multiple textures we need to use multiple + // texture units + glUniform1i(textureUniformBayerDataIn_, eglImageBayerIn_->texture_unit_uniform_id_); + if (!ccmEnabled_) { + glUniform1i(textureUniformRedLookupDataIn_, eglImageRedLookup_->texture_unit_uniform_id_); + glUniform1i(textureUniformGreenLookupDataIn_, eglImageGreenLookup_->texture_unit_uniform_id_); + glUniform1i(textureUniformBlueLookupDataIn_, eglImageBlueLookup_->texture_unit_uniform_id_); + } + + // These values are: + // firstRed = tex_bayer_first_red - bayer_8.vert + // imgSize = tex_size - bayer_8.vert + // step = tex_step - bayer_8.vert + // Stride = stride_factor identity.vert + // textureUniformProjMatri = No scaling + glUniform2fv(textureUniformBayerFirstRed_, 1, firstRed); + glUniform2fv(textureUniformSize_, 1, imgSize); + glUniform2fv(textureUniformStep_, 1, Step); + glUniform1f(textureUniformStrideFactor_, Stride); + glUniformMatrix4fv(textureUniformProjMatrix_, 1, + GL_FALSE, projIdentityMatrix); + + LOG(Debayer, Debug) << "vertexIn " << attributeVertex_ << " textureIn " << attributeTexture_ + << " tex_y " << textureUniformBayerDataIn_ + << " red_param " << textureUniformRedLookupDataIn_ + << " green_param " << textureUniformGreenLookupDataIn_ + << " blue_param " << textureUniformBlueLookupDataIn_ + << " tex_step " << textureUniformStep_ + << " tex_size " << textureUniformSize_ + << " stride_factor " << textureUniformStrideFactor_ + << " tex_bayer_first_red " << textureUniformBayerFirstRed_; + + LOG (Debayer, Debug) << "textureUniformY_ = 0 " << + " firstRed.x " << firstRed[0] << + " firstRed.y " << firstRed[1] << + " textureUniformSize_.width " << imgSize[0] << " " + " textureUniformSize_.height " << imgSize[1] << + " textureUniformStep_.x " << Step[0] << + " textureUniformStep_.y " << Step[1] << + " textureUniformStrideFactor_ " << Stride << + " textureUniformProjMatrix_ " << textureUniformProjMatrix_; + return; +} + +void DebayerEGL::debayerGPU(MappedFrameBuffer &in, MappedFrameBuffer &out, DebayerParams ¶ms) +{ + LOG(Debayer, Debug) + << "Input height " << height_ + << " width " << width_ + << " fd " << in.getPlaneFD(0); + + // eGL context switch + egl_.makeCurrent(); + + // Greate a standard texture + // we will replace this with the DMA version at some point + egl_.createTexture2D(eglImageBayerIn_, glFormat_, inputConfig_.stride / bytesPerPixel_, height_, in.planes()[0].data()); + + // Populate bayer parameters + if (ccmEnabled_) { + GLfloat ccm[9] = { + params.ccm[0][0], params.ccm[0][1], params.ccm[0][2], + params.ccm[1][0], params.ccm[1][1], params.ccm[1][2], + params.ccm[2][0], params.ccm[2][1], params.ccm[2][2], + }; + glUniformMatrix3fv(ccmUniformDataIn_, 1, GL_FALSE, ccm); + } else { + egl_.createTexture2D(eglImageRedLookup_, GL_LUMINANCE, DebayerParams::kRGBLookupSize, 1, ¶ms.red); + egl_.createTexture2D(eglImageGreenLookup_, GL_LUMINANCE, DebayerParams::kRGBLookupSize, 1, ¶ms.green); + egl_.createTexture2D(eglImageBlueLookup_, GL_LUMINANCE, DebayerParams::kRGBLookupSize, 1, ¶ms.blue); + } + + // Setup the scene + setShaderVariableValues(); + glViewport(0, 0, width_, height_); + glClear(GL_COLOR_BUFFER_BIT); + glDisable(GL_BLEND); + + // Draw the scene + glDrawArrays(GL_TRIANGLE_FAN, 0, DEBAYER_OPENGL_COORDS); + + // eglclientWaitScynKhr / eglwaitsynckr ? + egl_.swapBuffers(); + + // Copy from the output GBM buffer to our output plane + // once we get render to texture working the + // explicit lock ioctl, memcpy and unlock ioctl won't be required + gbmSurface_.getFrameBufferData(out.planes()[0].data(), out.planes()[0].size()); +} + +void DebayerEGL::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params) +{ + bench_.startFrame(); + + std::vector dmaSyncers; + + dmaSyncBegin(dmaSyncers, input, output); + + setParams(params); + + /* Copy metadata from the input buffer */ + FrameMetadata &metadata = output->_d()->metadata(); + metadata.status = input->metadata().status; + metadata.sequence = input->metadata().sequence; + metadata.timestamp = input->metadata().timestamp; + + MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read); + MappedFrameBuffer out(output, MappedFrameBuffer::MapFlag::Write); + if (!in.isValid() || !out.isValid()) { + LOG(Debayer, Error) << "mmap-ing buffer(s) failed"; + metadata.status = FrameMetadata::FrameError; + return; + } + + debayerGPU(in, out, params); + + dmaSyncers.clear(); + + bench_.finishFrame(); + + metadata.planes()[0].bytesused = out.planes()[0].size(); + + // Calculate stats for the whole frame + stats_->processFrame(frame, 0, input); + + outputBufferReady.emit(output); + inputBufferReady.emit(input); +} + +SizeRange DebayerEGL::sizes(PixelFormat inputFormat, const Size &inputSize) +{ + Size patternSize = this->patternSize(inputFormat); + unsigned int borderHeight = patternSize.height; + + if (patternSize.isNull()) + return {}; + + /* No need for top/bottom border with a pattern height of 2 */ + if (patternSize.height == 2) + borderHeight = 0; + + /* + * For debayer interpolation a border is kept around the entire image + * and the minimum output size is pattern-height x pattern-width. + */ + if (inputSize.width < (3 * patternSize.width) || + inputSize.height < (2 * borderHeight + patternSize.height)) { + LOG(Debayer, Warning) + << "Input format size too small: " << inputSize.toString(); + return {}; + } + + return SizeRange(Size(patternSize.width, patternSize.height), + Size((inputSize.width - 2 * patternSize.width) & ~(patternSize.width - 1), + (inputSize.height - 2 * borderHeight) & ~(patternSize.height - 1)), + patternSize.width, patternSize.height); +} + +} /* namespace libcamera */ diff --git a/src/libcamera/software_isp/debayer_egl.h b/src/libcamera/software_isp/debayer_egl.h new file mode 100644 index 000000000..56f5434ac --- /dev/null +++ b/src/libcamera/software_isp/debayer_egl.h @@ -0,0 +1,171 @@ +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* + * Copyright (C) 2025, Bryan O'Donoghue. + * + * Authors: + * Bryan O'Donoghue + * + * debayer_opengl.h - EGL debayer header + */ + +#pragma once + +#include +#include +#include + +#define GL_GLEXT_PROTOTYPES +#define EGL_EGLEXT_PROTOTYPES +#include +#include +#include + +#include + +#include "debayer.h" + +#include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/egl.h" +#include "libcamera/internal/framebuffer.h" +#include "libcamera/internal/mapped_framebuffer.h" +#include "libcamera/internal/software_isp/benchmark.h" +#include "libcamera/internal/software_isp/swstats_cpu.h" + +namespace libcamera { + +#define DEBAYER_EGL_MIN_SIMPLE_RGB_GAIN_TEXTURE_UNITS 4 +#define DEBAYER_OPENGL_COORDS 4 + +/** + * \class DebayerEGL + * \brief Class for debayering using an EGL Shader + * + * Implements an EGL shader based debayering solution. + */ +class DebayerEGL : public Debayer +{ +public: + /** + * \brief Constructs a DebayerEGL object. + * \param[in] stats Pointer to the stats object to use. + */ + DebayerEGL(std::unique_ptr stats); + ~DebayerEGL(); + + /* + * Setup the Debayer object according to the passed in parameters. + * Return 0 on success, a negative errno value on failure + * (unsupported parameters). + */ + int configure(const StreamConfiguration &inputCfg, + const std::vector> &outputCfgs, + bool ccmEnabled); + + /* + * Get width and height at which the bayer-pattern repeats. + * Return pattern-size or an empty Size for an unsupported inputFormat. + */ + Size patternSize(PixelFormat inputFormat); + + std::vector formats(PixelFormat input); + std::tuple strideAndFrameSize(const PixelFormat &outputFormat, const Size &size); + + void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output, DebayerParams params); + + /** + * \brief Get the file descriptor for the statistics. + * + * \return the file descriptor pointing to the statistics. + */ + const SharedFD &getStatsFD() { return stats_->getStatsFD(); } + + /** + * \brief Get the output frame size. + * + * \return The output frame size. + */ + unsigned int frameSize() { return outputConfig_.frameSize; } + + SizeRange sizes(PixelFormat inputFormat, const Size &inputSize); + +private: + static int getInputConfig(PixelFormat inputFormat, DebayerInputConfig &config); + static int getOutputConfig(PixelFormat outputFormat, DebayerOutputConfig &config); + int setupStandardBayerOrder(BayerFormat::Order order); + void pushEnv(std::vector &shaderEnv, const char *str); + int initBayerShaders(PixelFormat inputFormat, PixelFormat outputFormat); + int initEGLContext(); + int generateTextures(); + int compileShaderProgram(GLuint &shaderId, GLenum shaderType, + unsigned char *shaderData, int shaderDataLen, + std::vector shaderEnv); + int linkShaderProgram(void); + int getShaderVariableLocations(); + void setShaderVariableValues(void); + void configureTexture(GLuint &texture); + void debayerGPU(MappedFrameBuffer &in, MappedFrameBuffer &out, DebayerParams ¶ms); + + // Shader program identifiers + GLuint vertexShaderId_; + GLuint fragmentShaderId_; + GLuint programId_; + enum { + BAYER_INPUT_INDEX = 0, + BAYER_OUTPUT_INDEX, + BAYER_BUF_NUM, + }; + + // Pointer to object representing input texture + eGLImage *eglImageBayerIn_; + + eGLImage *eglImageRedLookup_; + eGLImage *eglImageGreenLookup_; + eGLImage *eglImageBlueLookup_; + + // Shader parameters + float firstRed_x_; + float firstRed_y_; + GLint attributeVertex_; + GLint attributeTexture_; + GLint textureUniformStep_; + GLint textureUniformSize_; + GLint textureUniformStrideFactor_; + GLint textureUniformBayerFirstRed_; + GLint textureUniformProjMatrix_; + + GLint textureUniformBayerDataIn_; + + // These textures will either point to simple RGB gains or to CCM lookup tables + GLint textureUniformRedLookupDataIn_; + GLint textureUniformGreenLookupDataIn_; + GLint textureUniformBlueLookupDataIn_; + + // Represent per-frame CCM as a uniform vector of floats 3 x 3 + GLint ccmUniformDataIn_; + bool ccmEnabled_; + + Rectangle window_; + std::unique_ptr stats_; + eGL egl_; + GBM gbmSurface_; + uint32_t width_; + uint32_t height_; + GLint glFormat_; + unsigned int bytesPerPixel_; + + GLfloat vcoordinates[DEBAYER_OPENGL_COORDS][2] = { + { -1.0f, -1.0f }, + { -1.0f, +1.0f }, + { +1.0f, +1.0f }, + { +1.0f, -1.0f }, + }; + + GLfloat tcoordinates[DEBAYER_OPENGL_COORDS][2] = { + { 0.0f, 1.0f }, + { 0.0f, 0.0f }, + { 1.0f, 0.0f }, + { 1.0f, 1.0f }, + }; +}; + +} /* namespace libcamera */ diff --git a/src/libcamera/software_isp/gpuisp-todo.txt b/src/libcamera/software_isp/gpuisp-todo.txt new file mode 100644 index 000000000..0ff82f81e --- /dev/null +++ b/src/libcamera/software_isp/gpuisp-todo.txt @@ -0,0 +1,42 @@ +List the TODOs in perceived order of ease. + +24 bit output support: + - Take the BPP we already capture and get a 24 bit GBM surface + - Pass a compile-time parameter to the shaders to tell them to do + gl_FragColor = rgb not gl_FragColor = rgba + +Make GPUISP default: + - Right now the environment variable allows over-riding to swtich + from CPU to GPU. + - Once we support 24 BPP output on GPUISP we will have the same + pixel format support as CPU and can set the default to GPU without + regressing functionality + +glTexture1D: + - Initial code was developed for < GLES 2.O but since we have fixed + on GLES >= 2.0 this means we can use glTexture1D + - Provided this is so amend the shaders to do val = texture(x, y, 0); + not texture(x, y, 0.5) the 0.5 is because of using glTexture2D + +Surfaceless GBM: + - We get a GBM surface and then have to swap buffers + If we rework for surfaceless GBM and EGL then the swap buffer can + be dropped. + +dma-buf texture upload: + - Currently we pass the input buffer to glCreateTexture2D. + We should be able to make the upload of the input buffer go faster + by using eglCreateImageKHR and enumerated the dma-buf contents. + +Render-to-texture: + - Right now we render to the GBM provided surface framebuffer + and then memcpy from that buffer to the target output buffer. + This necessitates flushing the cache on the target buffer in + addition to the memcpy(). + - Render-to-texture where we generate the target framebuffer + directly from a dma-buf handle will mitigate the memcpy() phase. + - It should be the case then that the consumer of the output buffer + i.e. the thing that's not libcamera is responsible to flush the cache + if-and-only-if that user writes to the buffer. + - We need to flush the cache on the buffer because we are memcpying() to it. + diff --git a/src/libcamera/software_isp/meson.build b/src/libcamera/software_isp/meson.build index aac7eda7b..c61ac7d59 100644 --- a/src/libcamera/software_isp/meson.build +++ b/src/libcamera/software_isp/meson.build @@ -2,14 +2,23 @@ softisp_enabled = pipelines.contains('simple') summary({'SoftISP support' : softisp_enabled}, section : 'Configuration') +summary({'SoftISP GPU acceleration' : gles_headless_enabled}, section : 'Configuration') if not softisp_enabled subdir_done() endif libcamera_internal_sources += files([ + 'benchmark.cpp', 'debayer.cpp', 'debayer_cpu.cpp', 'software_isp.cpp', 'swstats_cpu.cpp', ]) + +if softisp_enabled and gles_headless_enabled + config_h.set('HAVE_DEBAYER_EGL', 1) + libcamera_internal_sources += files([ + 'debayer_egl.cpp', + ]) +endif diff --git a/src/libcamera/software_isp/software_isp.cpp b/src/libcamera/software_isp/software_isp.cpp index 8f41591c8..0e4693b77 100644 --- a/src/libcamera/software_isp/software_isp.cpp +++ b/src/libcamera/software_isp/software_isp.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -25,6 +26,9 @@ #include "libcamera/internal/software_isp/debayer_params.h" #include "debayer_cpu.h" +#if HAVE_DEBAYER_EGL +#include "debayer_egl.h" +#endif /** * \file software_isp.cpp @@ -114,7 +118,20 @@ SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor, } stats->statsReady.connect(this, &SoftwareIsp::statsReady); - debayer_ = std::make_unique(std::move(stats)); +#if HAVE_DEBAYER_EGL + const char *softISPMode = utils::secure_getenv("LIBCAMERA_SOFTISP_MODE"); + + if (softISPMode && !strcmp(softISPMode, "gpu")) + debayer_ = std::make_unique(std::move(stats)); +#endif + if (!debayer_) + debayer_ = std::make_unique(std::move(stats)); + + if (!debayer_) { + LOG(SoftwareIsp, Error) << "Failed to create Debayer object"; + return; + } + debayer_->inputBufferReady.connect(this, &SoftwareIsp::inputReady); debayer_->outputBufferReady.connect(this, &SoftwareIsp::outputReady); @@ -159,8 +176,6 @@ SoftwareIsp::SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor, metadataReady.emit(frame, metadata); }); ipa_->setSensorControls.connect(this, &SoftwareIsp::setSensorCtrls); - - debayer_->moveToThread(&ispWorkerThread_); } SoftwareIsp::~SoftwareIsp() @@ -262,7 +277,18 @@ int SoftwareIsp::configure(const StreamConfiguration &inputCfg, if (ret < 0) return ret; - return debayer_->configure(inputCfg, outputCfgs, ccmEnabled_); + debayer_->moveToThread(&ispWorkerThread_); + ispWorkerThread_.start(); + + ret = debayer_->invokeMethod(&Debayer::configure, + ConnectionTypeBlocking, inputCfg, + outputCfgs, ccmEnabled_); + if (ret) { + ispWorkerThread_.exit(); + ispWorkerThread_.wait(); + } + + return ret; } /** @@ -343,7 +369,6 @@ int SoftwareIsp::start() if (ret) return ret; - ispWorkerThread_.start(); return 0; } @@ -386,7 +411,7 @@ void SoftwareIsp::stop() void SoftwareIsp::process(uint32_t frame, FrameBuffer *input, FrameBuffer *output) { ipa_->computeParams(frame); - debayer_->invokeMethod(&DebayerCpu::process, + debayer_->invokeMethod(&Debayer::process, ConnectionTypeQueued, frame, input, output, debayerParams_); } diff --git a/src/libcamera/software_isp/swstats_cpu.cpp b/src/libcamera/software_isp/swstats_cpu.cpp index a6a73f483..1ff15f5b7 100644 --- a/src/libcamera/software_isp/swstats_cpu.cpp +++ b/src/libcamera/software_isp/swstats_cpu.cpp @@ -9,13 +9,14 @@ * CPU based software statistics implementation */ -#include "swstats_cpu.h" +#include "libcamera/internal/software_isp/swstats_cpu.h" #include #include #include "libcamera/internal/bayer_format.h" +#include "libcamera/internal/mapped_framebuffer.h" namespace libcamera { @@ -58,6 +59,8 @@ namespace libcamera { * also indicates if processLine2() should be called or not. * This may only be called after a successful configure() call. * + * Valid sizes are: 1x1, 2x2, 4x2 or 4x4. + * * \return The pattern size */ @@ -71,6 +74,19 @@ namespace libcamera { * patternSize height == 1. * It'll process line 0 and 1 for input formats with patternSize height >= 2. * This function may only be called after a successful setWindow() call. + * + * This function takes an array of src pointers each pointing to a line in + * the source image. + * + * Bayer input data requires (patternSize_.height + 1) src pointers, with + * the middle element of the array pointing to the actual line being processed. + * Earlier element(s) will point to the previous line(s) and later element(s) + * to the next line(s). See the DebayerCpu::debayerFn documentation for details. + * + * Planar input data requires a src pointer for each plane, with src[0] pointing + * to the line in plane 0, etc. + * + * For non Bayer single plane input data only a single src pointer is required. */ /** @@ -89,20 +105,6 @@ namespace libcamera { * \brief Signals that the statistics are ready */ -/** - * \typedef SwStatsCpu::statsProcessFn - * \brief Called when there is data to get statistics from - * \param[in] src The input data - * - * These functions take an array of (patternSize_.height + 1) src - * pointers each pointing to a line in the source image. The middle - * element of the array will point to the actual line being processed. - * Earlier element(s) will point to the previous line(s) and later - * element(s) to the next line(s). - * - * See the documentation of DebayerCpu::debayerFn for more details. - */ - /** * \var unsigned int SwStatsCpu::ySkipMask_ * \brief Skip lines where this bitmask is set in y @@ -113,13 +115,6 @@ namespace libcamera { * \brief Statistics window, set by setWindow(), used every line */ -/** - * \var Size SwStatsCpu::patternSize_ - * \brief The size of the bayer pattern - * - * Valid sizes are: 2x2, 4x2 or 4x4. - */ - /** * \var unsigned int SwStatsCpu::xShift_ * \brief The offset of x, applied to window_.x for bayer variants @@ -147,10 +142,7 @@ static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */ \ uint64_t sumR = 0; \ uint64_t sumG = 0; \ - uint64_t sumB = 0; \ - pixel_t r0 = 0, r1 = 0, b0 = 0, \ - b1 = 0, g0 = 0, g1 = 0; \ - uint64_t sharpness = 0; + uint64_t sumB = 0; #define SWSTATS_ACCUMULATE_LINE_STATS(div) \ sumR += r; \ @@ -160,18 +152,12 @@ static constexpr unsigned int kBlueYMul = 29; /* 0.114 * 256 */ yVal = r * kRedYMul; \ yVal += g * kGreenYMul; \ yVal += b * kBlueYMul; \ - stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++; \ - if (r0 != 0) \ - sharpness += abs(r - 2*r1 + r0) * kRedYMul + abs(g - 2*g1 + g0) * kGreenYMul + abs(b - 2*b1 + b0) * kBlueYMul; \ - r0 = r1; g0 = g1; b0 = b1; \ - r1 = r; g1 = g; b1 = b; \ - + stats_.yHistogram[yVal * SwIspStats::kYHistogramSize / (256 * 256 * (div))]++; #define SWSTATS_FINISH_LINE_STATS() \ stats_.sumR_ += sumR; \ stats_.sumG_ += sumG; \ - stats_.sumB_ += sumB; \ - stats_.sharpness += sharpness; + stats_.sumB_ += sumB; void SwStatsCpu::statsBGGR8Line0(const uint8_t *src[]) { @@ -315,7 +301,6 @@ void SwStatsCpu::startFrame(void) stats_.sumR_ = 0; stats_.sumB_ = 0; stats_.sumG_ = 0; - stats_.sharpness = 0; stats_.yHistogram.fill(0); } @@ -376,11 +361,14 @@ int SwStatsCpu::setupStandardBayerOrder(BayerFormat::Order order) */ int SwStatsCpu::configure(const StreamConfiguration &inputCfg) { + stride_ = inputCfg.stride; + BayerFormat bayerFormat = BayerFormat::fromPixelFormat(inputCfg.pixelFormat); if (bayerFormat.packing == BayerFormat::Packing::None && setupStandardBayerOrder(bayerFormat.order) == 0) { + processFrame_ = &SwStatsCpu::processBayerFrame2; switch (bayerFormat.bitDepth) { case 8: stats0_ = &SwStatsCpu::statsBGGR8Line0; @@ -401,6 +389,7 @@ int SwStatsCpu::configure(const StreamConfiguration &inputCfg) /* Skip every 3th and 4th line, sample every other 2x2 block */ ySkipMask_ = 0x02; xShift_ = 0; + processFrame_ = &SwStatsCpu::processBayerFrame2; switch (bayerFormat.order) { case BayerFormat::BGGR: @@ -441,4 +430,50 @@ void SwStatsCpu::setWindow(const Rectangle &window) window_.height &= ~(patternSize_.height - 1); } +void SwStatsCpu::processBayerFrame2(MappedFrameBuffer &in) +{ + const uint8_t *src = in.planes()[0].data(); + const uint8_t *linePointers[3]; + + /* Adjust src for starting at window_.y */ + src += window_.y * stride_; + + for (unsigned int y = 0; y < window_.height; y += 2) { + if (y & ySkipMask_) { + src += stride_ * 2; + continue; + } + + /* linePointers[0] is not used by any stats0_ functions */ + linePointers[1] = src; + linePointers[2] = src + stride_; + (this->*stats0_)(linePointers); + src += stride_ * 2; + } +} + +/** + * \brief Calculate statistics for a frame in one go + * \param[in] frame The frame number + * \param[in] bufferId ID of the statistics buffer + * \param[in] input The frame to process + * + * This may only be called after a successful setWindow() call. + */ +void SwStatsCpu::processFrame(uint32_t frame, uint32_t bufferId, FrameBuffer *input) +{ + bench_.startFrame(); + startFrame(); + + MappedFrameBuffer in(input, MappedFrameBuffer::MapFlag::Read); + if (!in.isValid()) { + LOG(SwStatsCpu, Error) << "mmap-ing buffer(s) failed"; + return; + } + + (this->*processFrame_)(in); + finishFrame(frame, bufferId); + bench_.finishFrame(); +} + } /* namespace libcamera */ diff --git a/src/libcamera/v4l2_videodevice.cpp b/src/libcamera/v4l2_videodevice.cpp index 7822bf1ef..d53aa2d3c 100644 --- a/src/libcamera/v4l2_videodevice.cpp +++ b/src/libcamera/v4l2_videodevice.cpp @@ -2031,9 +2031,10 @@ int V4L2VideoDevice::streamOff() /* Send back all queued buffers. */ for (auto it : queuedBuffers_) { FrameBuffer *buffer = it.second; + FrameMetadata &metadata = buffer->_d()->metadata(); cache_->put(it.first); - buffer->_d()->cancel(); + metadata.status = FrameMetadata::FrameCancelled; bufferReady.emit(buffer); } diff --git a/subprojects/libpisp.wrap b/subprojects/libpisp.wrap index 0e0c7baaf..8b62c036f 100644 --- a/subprojects/libpisp.wrap +++ b/subprojects/libpisp.wrap @@ -2,5 +2,5 @@ [wrap-git] url = https://github.com/raspberrypi/libpisp.git -revision = v1.2.1 +revision = v1.2.0 depth = 1 diff --git a/subprojects/libyaml.wrap b/subprojects/libyaml.wrap index 44ac0ff8e..392416c61 100644 --- a/subprojects/libyaml.wrap +++ b/subprojects/libyaml.wrap @@ -1,13 +1,7 @@ # SPDX-License-Identifier: CC0-1.0 -[wrap-file] -directory = yaml-0.2.5 -source_url = https://pyyaml.org/download/libyaml/yaml-0.2.5.tar.gz -source_filename = yaml-0.2.5.tar.gz -source_hash = c642ae9b75fee120b2d96c712538bd2cf283228d2337df2cf2988e3c02678ef4 -patch_filename = libyaml_0.2.5-1_patch.zip -patch_url = https://wrapdb.mesonbuild.com/v2/libyaml_0.2.5-1/get_patch -patch_hash = bf2e9b922be00b6b00c5fce29d9fb8dc83f0431c77239f3b73e8b254d3f3f5b5 - -[provide] -yaml-0.1 = yaml_dep +[wrap-git] +directory = libyaml +url = https://github.com/yaml/libyaml +# tags/0.2.5 +revision = 2c891fc7a770e8ba2fec34fc6b545c672beb37e6 diff --git a/test/log/log_api.cpp b/test/log/log_api.cpp index 8d19cf0ce..0b999738d 100644 --- a/test/log/log_api.cpp +++ b/test/log/log_api.cpp @@ -26,11 +26,6 @@ using namespace std; using namespace libcamera; LOG_DEFINE_CATEGORY(LogAPITest) -LOG_DEFINE_CATEGORY(Cat0) -LOG_DEFINE_CATEGORY(Cat1) -LOG_DEFINE_CATEGORY(Cat2) -LOG_DEFINE_CATEGORY(Cat3) -LOG_DEFINE_CATEGORY(Cat4) class LogAPITest : public Test { @@ -79,34 +74,6 @@ protected: return TestPass; } - int testEnvLevels() - { - setenv("LIBCAMERA_LOG_LEVELS", - "Cat0:0,Cat0:9999,Cat1:INFO,Cat1:INVALID,Cat2:2,Cat2:-1," - "Cat3:ERROR,Cat3:{[]},Cat4:4,Cat4:rubbish", - true); - logSetTarget(libcamera::LoggingTargetNone); - - const std::pair expected[] = { - { _LOG_CATEGORY(Cat0)(), libcamera::LogDebug }, - { _LOG_CATEGORY(Cat1)(), libcamera::LogInfo }, - { _LOG_CATEGORY(Cat2)(), libcamera::LogWarning }, - { _LOG_CATEGORY(Cat3)(), libcamera::LogError }, - { _LOG_CATEGORY(Cat4)(), libcamera::LogFatal }, - }; - bool ok = true; - - for (const auto &[c, s] : expected) { - if (c.severity() != s) { - ok = false; - cerr << "Severity of " << c.name() << " (" << c.severity() << ") " - << "does not equal " << s << endl; - } - } - - return ok ? TestPass : TestFail; - } - int testFile() { int fd = open("/tmp", O_TMPFILE | O_RDWR, S_IRUSR | S_IWUSR); @@ -168,11 +135,7 @@ protected: int run() override { - int ret = testEnvLevels(); - if (ret != TestPass) - return TestFail; - - ret = testFile(); + int ret = testFile(); if (ret != TestPass) return TestFail; diff --git a/test/log/meson.build b/test/log/meson.build index f413c3898..2298ff84e 100644 --- a/test/log/meson.build +++ b/test/log/meson.build @@ -11,6 +11,5 @@ foreach test : log_test link_with : test_libraries, include_directories : test_includes_internal) - test(test['name'], exe, suite : 'log', - should_fail : test.get('should_fail', false)) + test(test['name'], exe, suite : 'log') endforeach diff --git a/utils/gen-shader-header.py b/utils/gen-shader-header.py new file mode 100755 index 000000000..6668e648f --- /dev/null +++ b/utils/gen-shader-header.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0-or-later +# Copyright (C) 2025, Bryan O'Donoghue. +# +# Author: Bryan O'Donoghue +# +# A python script which takes a list of shader files and converts into a C +# header. +# +import sys + +try: + with open(sys.argv[2]) as file: + data = file.read() + data_len = len(data) + + name = sys.argv[1].replace(".", "_") + name_len = name + "_len" + + j = 0 + print("unsigned char", name, "[] = {") + for ch in data: + print(f"0x{ord(ch):02x}, ", end="") + j = j + 1 + if j == 16: + print() + j = 0 + if j != 0: + print() + print("};") + + print() + print(f"const unsigned int {name_len}={data_len};") + +except FileNotFoundError: + print(f"File {sys.argv[2]} not found", file=sys.stderr) +except IOError: + print(f"Unable to read {sys.argv[2]}", file=sys.stderr) diff --git a/utils/gen-shader-headers.sh b/utils/gen-shader-headers.sh new file mode 100755 index 000000000..ca4f19f07 --- /dev/null +++ b/utils/gen-shader-headers.sh @@ -0,0 +1,44 @@ +#!/bin/sh +set -x + +if [ $# -lt 4 ]; then + echo "Invalid arg count must be >= 5" + exit 1 +fi +src_dir="$1"; shift +build_dir="$1"; shift +build_path=$build_dir/"$1"; shift + +cat < "$build_path" +/* SPDX-License-Identifier: LGPL-2.1-or-later */ +/* This file is auto-generated, do not edit! */ +/* + * Copyright (C) 2025, Linaro Ltd. + * + */ + +#pragma once + +EOF + +cat <> "$build_path" +/* + * List the names of the shaders at the top of + * header for readability's sake + * +EOF + +for file in "$@"; do + echo "file is $file" + name=$(basename "$build_dir/$file" | tr '.' '_') + echo " * unsigned char $name;" >> "$build_path" +done + +echo "*/" >> "$build_path" + +echo "/* Hex encoded shader data */" >> "$build_path" +for file in "$@"; do + name=$(basename "$build_dir/$file") + "$src_dir/utils/gen-shader-header.py" "$name" "$build_dir/$file" >> "$build_path" + echo >> "$build_path" +done diff --git a/utils/meson.build b/utils/meson.build index 95d657ac9..3deed8ad4 100644 --- a/utils/meson.build +++ b/utils/meson.build @@ -3,5 +3,7 @@ subdir('codegen') subdir('ipu3') +gen_shader_headers = files('gen-shader-headers.sh') + ## Module signing gen_ipa_priv_key = files('gen-ipa-priv-key.sh')