Compare commits

..

No commits in common. "master" and "v0.3.0" have entirely different histories.

729 changed files with 8984 additions and 65061 deletions

View file

@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
# #
# clang-format configuration file. Intended for clang-format >= 12. # clang-format configuration file. Intended for clang-format >= 7.
# #
# For more information, see: # For more information, see:
# #
@ -75,12 +75,11 @@ IncludeCategories:
Priority: 9 Priority: 9
# Qt includes (match before C++ standard library) # Qt includes (match before C++ standard library)
- Regex: '<Q([A-Za-z0-9\-_])+>' - Regex: '<Q([A-Za-z0-9\-_])+>'
CaseSensitive: true
Priority: 9 Priority: 9
# Headers in <> with an extension. (+system libraries) # Headers in <> with an extension. (+system libraries)
- Regex: '<([A-Za-z0-9\-_])+\.h>' - Regex: '<([A-Za-z0-9\-_])+\.h>'
Priority: 2 Priority: 2
# System headers # System headers
- Regex: '<sys/.*>' - Regex: '<sys/.*>'
Priority: 2 Priority: 2
# C++ standard library includes (no extension) # C++ standard library includes (no extension)
@ -100,7 +99,7 @@ IncludeCategories:
# IPA Interfaces # IPA Interfaces
- Regex: '<libcamera/ipa/.*\.h>' - Regex: '<libcamera/ipa/.*\.h>'
Priority: 7 Priority: 7
# libcamera Internal headers in "" # libcamera Internal headers in ""
- Regex: '"libcamera/internal/.*\.h"' - Regex: '"libcamera/internal/.*\.h"'
Priority: 8 Priority: 8
# Other libraries headers with one group per library (.h or .hpp) # Other libraries headers with one group per library (.h or .hpp)

View file

@ -1,29 +0,0 @@
# SPDX-License-Identifier: CC0-1.0
root = true
[*]
charset = utf-8
end_of_line = lf
insert_final_newline = true
trim_trailing_whitespace = true
[*.{cpp,h}]
indent_size = 8
indent_style = tab
[*.json]
indent_size = 4
indent_style = space
[*.py]
indent_size = 4
indent_style = space
[*.yaml]
indent_size = 2
indent_style = space
[{meson.build,meson_options.txt}]
indent_size = 4
indent_style = space

1
.gitignore vendored
View file

@ -6,4 +6,3 @@
*.patch *.patch
*.pyc *.pyc
__pycache__/ __pycache__/
venv/

View file

@ -1,33 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = NO
HIDE_UNDOC_MEMBERS = NO
HTML_OUTPUT = internal-api-html
INTERNAL_DOCS = YES
ENABLED_SECTIONS = internal
INPUT = "@TOP_SRCDIR@/Documentation" \
"@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_legacy.cpp \
@TOP_SRCDIR@/src/libcamera/sensor/camera_sensor_raw.cpp \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/

View file

@ -1,20 +0,0 @@
# SPDX-License-Identifier: CC-BY-SA-4.0
@INCLUDE_PATH = @TOP_BUILDDIR@/Documentation
@INCLUDE = Doxyfile-common
HIDE_UNDOC_CLASSES = YES
HIDE_UNDOC_MEMBERS = YES
HTML_OUTPUT = api-html
INTERNAL_DOCS = NO
INPUT = "@TOP_SRCDIR@/Documentation" \
${inputs}
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/class.h \
@TOP_SRCDIR@/include/libcamera/base/object.h \
@TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/src/libcamera/base/class.cpp \
@TOP_SRCDIR@/src/libcamera/base/object.cpp
PREDEFINED += __DOXYGEN_PUBLIC__

View file

@ -22,17 +22,35 @@ CASE_SENSE_NAMES = YES
QUIET = YES QUIET = YES
WARN_AS_ERROR = @WARN_AS_ERROR@ WARN_AS_ERROR = @WARN_AS_ERROR@
INPUT = "@TOP_SRCDIR@/include/libcamera" \
"@TOP_SRCDIR@/src/ipa/ipu3" \
"@TOP_SRCDIR@/src/ipa/libipa" \
"@TOP_SRCDIR@/src/libcamera" \
"@TOP_BUILDDIR@/include/libcamera" \
"@TOP_BUILDDIR@/src/libcamera"
FILE_PATTERNS = *.c \ FILE_PATTERNS = *.c \
*.cpp \ *.cpp \
*.dox \
*.h *.h
RECURSIVE = YES RECURSIVE = YES
EXCLUDE = @TOP_SRCDIR@/include/libcamera/base/span.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_sysfs.h \
@TOP_SRCDIR@/include/libcamera/internal/device_enumerator_udev.h \
@TOP_SRCDIR@/include/libcamera/internal/ipc_pipe_unixsocket.h \
@TOP_SRCDIR@/src/libcamera/device_enumerator_sysfs.cpp \
@TOP_SRCDIR@/src/libcamera/device_enumerator_udev.cpp \
@TOP_SRCDIR@/src/libcamera/ipc_pipe_unixsocket.cpp \
@TOP_SRCDIR@/src/libcamera/pipeline/ \
@TOP_SRCDIR@/src/libcamera/tracepoints.cpp \
@TOP_BUILDDIR@/include/libcamera/internal/tracepoints.h \
@TOP_BUILDDIR@/include/libcamera/ipa/soft_ipa_interface.h \
@TOP_BUILDDIR@/src/libcamera/proxy/
EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \ EXCLUDE_PATTERNS = @TOP_BUILDDIR@/include/libcamera/ipa/*_serializer.h \
@TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \ @TOP_BUILDDIR@/include/libcamera/ipa/*_proxy.h \
@TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \ @TOP_BUILDDIR@/include/libcamera/ipa/ipu3_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/mali-c55_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \ @TOP_BUILDDIR@/include/libcamera/ipa/raspberrypi_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \ @TOP_BUILDDIR@/include/libcamera/ipa/rkisp1_*.h \
@TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h @TOP_BUILDDIR@/include/libcamera/ipa/vimc_*.h
@ -52,13 +70,14 @@ EXCLUDE_SYMBOLS = libcamera::BoundMethodArgs \
EXCLUDE_SYMLINKS = YES EXCLUDE_SYMLINKS = YES
HTML_OUTPUT = api-html
GENERATE_LATEX = NO GENERATE_LATEX = NO
MACRO_EXPANSION = YES MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES EXPAND_ONLY_PREDEF = YES
INCLUDE_PATH = "@TOP_BUILDDIR@/include" \ INCLUDE_PATH = "@TOP_SRCDIR@/include/libcamera"
"@TOP_SRCDIR@/include"
INCLUDE_FILE_PATTERNS = *.h INCLUDE_FILE_PATTERNS = *.h
IMAGE_PATH = "@TOP_SRCDIR@/Documentation/images" IMAGE_PATH = "@TOP_SRCDIR@/Documentation/images"

View file

@ -2,7 +2,7 @@
.. _api: .. _api:
API Reference API
============= ===
:: Placeholder for Doxygen documentation :: Placeholder for Doxygen documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _camera-sensor-model: .. _camera-sensor-model:
.. todo: Move to Doxygen-generated documentation .. todo: Move to Doxygen-generated documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-4.0 .. SPDX-License-Identifier: CC-BY-4.0
.. include:: documentation-contents.rst
.. _code-of-conduct: .. _code-of-conduct:
Contributor Covenant Code of Conduct Contributor Covenant Code of Conduct

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _coding-style-guidelines: .. _coding-style-guidelines:
Coding Style Guidelines Coding Style Guidelines
@ -217,7 +215,7 @@ shall be avoided when possible, but are allowed when required (for instance to
implement factories with auto-registration). They shall not depend on any other implement factories with auto-registration). They shall not depend on any other
global variable, should run a minimal amount of code in the constructor and global variable, should run a minimal amount of code in the constructor and
destructor, and code that contains dependencies should be moved to a later destructor, and code that contains dependencies should be moved to a later
point in time. point in time.
Error Handling Error Handling
~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~

View file

@ -37,11 +37,8 @@ author = u'Kieran Bingham, Jacopo Mondi, Laurent Pinchart, Niklas Söderlund'
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones. # ones.
extensions = [ extensions = [
'sphinx.ext.graphviz'
] ]
graphviz_output_format = 'svg'
# Add any paths that contain templates here, relative to this directory. # Add any paths that contain templates here, relative to this directory.
templates_path = [] templates_path = []
@ -64,12 +61,7 @@ language = 'en'
# List of patterns, relative to source directory, that match files and # List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files. # directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path. # This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [ exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
'_build',
'Thumbs.db',
'.DS_Store',
'documentation-contents.rst',
]
# The name of the Pygments (syntax highlighting) style to use. # The name of the Pygments (syntax highlighting) style to use.
pygments_style = None pygments_style = None

View file

@ -1,331 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Design of Exposure and Gain controls
====================================
This document explains the design and rationale of the controls related to
exposure and gain. This includes the all-encompassing auto-exposure (AE), the
manual exposure control, and the manual gain control.
Description of the problem
--------------------------
Sub controls
^^^^^^^^^^^^
There are more than one control that make up total exposure: exposure time,
gain, and aperture (though for now we will not consider aperture). We already
had individual controls for setting the values of manual exposure and manual
gain, but for switching between auto mode and manual mode we only had a
high-level boolean AeEnable control that would set *both* exposure and gain to
auto mode or manual mode; we had no way to set one to auto and the other to
manual.
So, we need to introduce two new controls to act as "levers" to indicate
individually for exposure and gain if the value would come from AEGC or if it
would come from the manual control value.
Aperture priority
^^^^^^^^^^^^^^^^^
We eventually may need to support aperture, and so whatever our solution is for
having only some controls on auto and the others on manual needs to be
extensible.
Flickering when going from auto to manual
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
When a manual exposure or gain value is requested by the application, it costs
a few frames worth of time for them to take effect. This means that during a
transition from auto to manual, there would be flickering in the control values
and the transition won't be smooth.
Take for instance the following flow, where we start on auto exposure (which
for the purposes of the example increments by 1 each frame) and we want to
switch seamlessly to manual exposure, which involves copying the exposure value
computed by the auto exposure algorithm:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A 2 2 2 2
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 2 2
As we can see, after frame N+2 completes, we copy the exposure value that was
used for frame N+2 (which was computed by AE algorithm), and queue that value
into request N+3 with manual mode on. However, as it takes two frames for the
exposure to be set, the exposure still changes since it is set by AE, and we
get a flicker in the exposure during the switch from auto to manual.
A solution is to *not submit* any exposure value when manual mode is enabled,
and wait until the manual mode as been "applied" before copying the exposure
value:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N | | N+1 | | N+2 | | N+3 | | N+4 | | N+5 | | N+6 |
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Auto Auto Manual Manual Manual Manual
Exp requested: N/A N/A N/A None None None 5
Set in Frame: N+2 N+3 N+4 N+5 N+6 N+7 N+8
Mode used: Auto Auto Auto Auto Auto Manual Manual
Exp used: 0 1 2 3 4 5 5
In practice, this works. However, libcamera has a policy where once a control
is submitted, its value is saved and does not need to be resubmitted. If the
manual exposure value was set while auto mode was on, in theory the value would
be saved, so when manual mode is enabled, the exposure value that was
previously set would immediately be used. Clearly this solution isn't correct,
but it can serve as the basis for a proper solution, with some more rigorous
rules.
Existing solutions
------------------
Raspberry Pi
^^^^^^^^^^^^
The Raspberry Pi IPA gets around the lack of individual AeEnable controls for
exposure and gain by using magic values. When AeEnable is false, if one of the
manual control values was set to 0 then the value computed by AEGC would be
used for just that control. This solution isn't desirable, as it prevents
that magic value from being used as a valid value.
To get around the flickering issue, when AeEnable is false, the Raspberry Pi
AEGC simply stops updating the values to be set, without restoring the
previously set manual exposure time and gain. This works, but is not a proper
solution.
Android
^^^^^^^
The Android HAL specification requires that exposure and gain (sensitivity)
must both be manual or both be auto. It cannot be that one is manual while the
other is auto, so they simply don't support sub controls.
For the flickering issue, the Android HAL has an AeLock control. To transition
from auto to manual, the application would keep AE on auto, and turn on the
lock. Once the lock has propagated through, then the value can be copied from
the result into the request and the lock disabled and the mode set to manual.
The problem with this solution is, besides the extra complexity, that it is
ambiguous what happens if there is a state transition from manual to locked
(even though it's a state transition that doesn't make sense). If locked is
defined to "use the last automatically computed values" then it could use the
values from the last time it AE was set to auto, or it would be undefined if AE
was never auto (eg. it started out as manual), or if AE is implemented to run
in the background it could just use the current values that are computed. If
locked is defined to "use the last value that was set" there would be less
ambiguity. Still, it's better if we can make it impossible to execute this
nonsensical state transition, and if we can reduce the complexity of having
this extra control or extra setting on a lever.
Summary of goals
----------------
- We need a lock of some sort, to instruct the AEGC to not update output
results
- We need manual modes, to override the values computed by the AEGC
- We need to support seamless transitions from auto to manual, and do so
without flickering
- We need custom minimum values for the manual controls; that is, no magic
values for enabling/disabling auto
- All of these need to be done with AE sub-controls (exposure time, analogue
gain) and be extensible to aperture in the future
Our solution
------------
A diagram of our solution:
::
+----------------------------+-------------+------------------+-----------------+
| INPUT | ALGORITHM | RESULT | OUTPUT |
+----------------------------+-------------+------------------+-----------------+
ExposureTimeMode ExposureTimeMode
---------------------+----------------------------------------+----------------->
0: Auto | |
1: Manual | V
| |\
| | \
| /----------------------------------> | 1| ExposureTime
| | +-------------+ exposure time | | -------------->
\--)--> | | --------------> | 0|
ExposureTime | | | | /
------------------------+--> | | |/
| | AeState
| AEGC | ----------------------------------->
AnalogueGain | |
------------------------+--> | | |\
| | | | \
/--)--> | | --------------> | 0| AnalogueGain
| | +-------------+ analogue gain | | -------------->
| \----------------------------------> | 1|
| | /
| |/
| ^
AnalogueGainMode | | AnalogueGainMode
---------------------+----------------------------------------+----------------->
0: Auto
1: Manual
AeEnable
- True -> ExposureTimeMode:Auto + AnalogueGainMode:Auto
- False -> ExposureTimeMode:Manual + AnalogueGainMode:Manual
The diagram is divided in four sections horizontally:
- Input: The values received from the request controls
- Algorithm: The algorithm itself
- Result: The values calculated by the algorithm
- Output: The values reported in result metadata and applied to the device
The four input controls are divided between manual values (ExposureTime and
AnalogueGain), and operation modes (ExposureTimeMode and AnalogueGainMode). The
former are the manual values, the latter control how they're applied. The two
modes are independent from each other, and each can take one of two values:
- Auto (0): The AGC computes the value normally. The AGC result is applied
to the output. The manual value is ignored *and is not retained*.
- Manual (1): The AGC uses the manual value internally. The corresponding
manual control from the request is applied to the output. The AGC result
is ignored.
The AeState control reports the state of the unified AEGC block. If both
ExposureTimeMode and AnalogueGainMode are set to manual then it will report
Idle. If at least one of the two is set to auto, then AeState will report
if the AEGC has Converged or not (Searching). This control replaces the old
AeLocked control, as it was insufficient for reporting the AE state.
There is a caveat to manual mode: the manual control value is not retained if
it is set during auto mode. This means that if manual mode is entered without
also setting the manual value, then it will enter a state similar to "locked",
where the last automatically computed value while the mode was auto will be
used. Once the manual value is set, then that will be used and retained as
usual.
This simulates an auto -> locked -> manual or auto -> manual state transition,
and makes it impossible to do the nonsensical manual -> locked state
transition.
AeEnable still exists to allow applications to set the mode of all the
sub-controls at once. Besides being for convenience, this will also be useful
when we eventually implement an aperture control. This is because applications
that will be made before aperture will have been available would still be able
to set aperture mode to auto or manual, as opposed to having the aperture stuck
at auto while the application really wanted manual. Although the aperture would
still be stuck at an uncontrollable value, at least it would be at a static
usable value as opposed to varying via the AEGC algorithm.
With this solution, the earlier example would become:
::
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
| N+2 | | N+3 | | N+4 | | N+5 | | N+6 | | N+7 | | N+8 | | N+9 | | N+10|
+-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+ +-----+
Mode requested: Auto Manual Manual Manual Manual Manual Manual Manual Manual
Exp requested: N/A None None None None 10 None 10 10
Set in Frame: N+4 N+5 N+6 N+7 N+8 N+9 N+10 N+11 N+12
Mode used: Auto Auto Auto Manual Manual Manual Manual Manual Manual
Exp used: 2 3 4 5 5 5 5 10 10
This example is extended by a few frames to exhibit the simulated "locked"
state. At frame N+5 the application has confirmed that the manual mode has been
entered, but does not provide a manual value until request N+7. Thus, the value
that is used in requests N+5 and N+6 (where the mode is disabled), comes from
the last value that was used when the mode was auto, which comes from frame
N+4.
Then, in N+7, a manual value of 10 is supplied. It takes until frame N+9 for
the exposure to be applied. N+8 does not supply a manual value, but the last
supplied value is retained, so a manual value of 10 is still used and set in
frame N+10.
Although this behavior is the same as what we had with waiting for the manual
mode to propagate (in the section "Description of the problem"), this time it
is correct as we have defined specifically that if a manual value was specified
while the mode was auto, it will not be retained.
Description of the controls
---------------------------
As described above, libcamera offers the following controls related to exposure
and gain:
- AnalogueGain
- AnalogueGainMode
- ExposureTime
- ExposureTimeMode
- AeState
- AeEnable
Auto-exposure and auto-gain can be enabled and disabled separately using the
ExposureTimeMode and AnalogueGainMode controls respectively. The AeEnable
control can also be used, as it sets both of the modes simultaneously. The
AeEnable control is not returned in metadata.
When the respective mode is set to auto, the respective value that is computed
by the AEGC algorithm is applied to the image sensor. Any value that is
supplied in the manual ExposureTime/AnalogueGain control is ignored and not
retained. Another way to understand this is that when the mode transitions from
auto to manual, the internally stored control value is overwritten with the
last value computed by the auto algorithm.
This means that when we transition from auto to manual without supplying a
manual control value, the last value that was set by the AEGC algorithm will
keep be used. This can be used to do a flickerless transition from auto to
manual as described earlier. If the camera started out in manual mode and no
corresponding value has been supplied yet, then a best-effort default value
shall be set.
The manual control value can be set in the same request as setting the mode to
auto if the desired manual control value is already known.
Transitioning from manual to auto shall be implicitly flickerless, as the AEGC
algorithms are expected to start running from the last manual value.
The AeState metadata reports the state of the AE algorithm. As AE cannot
compute exposure and gain separately, the state of the AE component is
unified. There are three states: Idle, Searching, and Converged.
The state shall be Idle if both ExposureTimeMode and AnalogueGainMode
are set to Manual. If the camera only supports one of the two controls,
then the state shall be Idle if that one control is set to Manual. If
the camera does not support Manual for at least one of the two controls,
then the state will never be Idle, as AE will always be running.
The state shall be Searching if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have not converged yet.
The state shall be Converged if at least one of exposure or gain calculated
by the AE algorithm is used (that is, at least one of the two modes is Auto),
*and* the value(s) have converged.

400
Documentation/docs.rst Normal file
View file

@ -0,0 +1,400 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. contents::
:local:
*************
Documentation
*************
.. toctree::
:hidden:
API <api-html/index>
API
===
The libcamera API is extensively documented using Doxygen. The :ref:`API
nightly build <api>` contains the most up-to-date API documentation, built from
the latest master branch.
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The camera devices shall implement auto exposure, auto gain and auto white
balance. Camera devices that include a focus lens shall implement auto
focus. Additional image enhancement algorithms, such as noise reduction or
video stabilization, may be implemented.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
libcamera Architecture
======================
::
---------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +-------------------------------------------------+
| Camera | | Camera Device |
| Devices | | +---------------------------------------------+ |
| Manager | | | Device-Agnostic | |
+-------------+ | | | |
^ | | +------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +---------------+ } |
| | | | } | ////Image//// | { |
| | | | <-> | /Processing// | } |
| | | | } | /Algorithms// | { |
| | | | { +---------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~ |
| | | | ======================== |
| | | | +---------------+ |
| | | | | //Pipeline/// | |
| | | | <-> | ///Handler/// | |
| | | | | ///////////// | |
| | +--------------------+ +---------------+ |
| | Device-Specific |
| +-------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
While offering a unified API towards upper layers, and presenting
itself as a single library, libcamera isn't monolithic. It exposes
multiple components through its public API, is built around a set of
separate helpers internally, uses device-specific components and can
load dynamic plugins.
Camera Devices Manager
The Camera Devices Manager provides a view of available cameras
in the system. It performs cold enumeration and runtime camera
management, and supports a hotplug notification mechanism in its
public API.
To avoid the cost associated with cold enumeration of all devices
at application start, and to arbitrate concurrent access to camera
devices, the Camera Devices Manager could later be split to a
separate service, possibly with integration in platform-specific
device management.
Camera Device
The Camera Device represents a camera device to upper layers. It
exposes full control of the device through the public API, and is
thus the highest level object exposed by libcamera.
Camera Device instances are created by the Camera Devices
Manager. An optional function to create new instances could be exposed
through the public API to speed up initialization when the upper
layer knows how to directly address camera devices present in the
system.
Pipeline Handler
The Pipeline Handler manages complex pipelines exposed by the kernel drivers
through the Media Controller and V4L2 APIs. It abstracts pipeline handling to
hide device-specific details to the rest of the library, and implements both
pipeline configuration based on stream configuration, and pipeline runtime
execution and scheduling when needed by the device.
This component is device-specific and is part of the libcamera code base. As
such it is covered by the same free software license as the rest of libcamera
and needs to be contributed upstream by device vendors. The Pipeline Handler
lives in the same process as the rest of the library, and has access to all
helpers and kernel camera-related devices.
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and interact with the kernel camera devices to control hardware image
processing based on the parameters supplied by upper layers, closing the
control loop of the ISP.
This component is device-specific and is loaded as an external plugin. It can
be part of the libcamera code base, in which case it is covered by the same
license, or provided externally as an open-source or closed-source component.
The component is sandboxed and can only interact with libcamera through
internal APIs specifically marked as such. In particular it will have no
direct access to kernel camera devices, and all its accesses to image and
metadata will be mediated by dmabuf instances explicitly passed to the
component. The component must be prepared to run in a process separate from
the main libcamera process, and to have a very restricted view of the system,
including no access to networking APIs and limited access to file systems.
The sandboxing mechanism isn't defined by libcamera. One example
implementation will be provided as part of the project, and platforms vendors
will be able to provide their own sandboxing mechanism as a plugin.
libcamera should provide a basic implementation of Image Processing
Algorithms, to serve as a reference for the internal API. Device vendors are
expected to provide a full-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an
environment in which the community will be able to compete with the
closed-source vendor binaries and develop a high quality open source
implementation.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
V4L2 Compatibility Layer
------------------------
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
`LD_PRELOAD` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
------------------
Camera support for Android is achieved through a generic Android
camera HAL implementation on top of libcamera. The HAL will implement internally
features required by Android and missing from libcamera, such as JPEG encoding
support.
The Android camera HAL implementation will initially target the
LIMITED hardware level, with support for the FULL level then being gradually
implemented.

View file

@ -1,35 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. container:: documentation-nav
* **Documentation for Users**
* :doc:`Introduction </introduction>`
* :doc:`/feature_requirements`
* :doc:`/guides/application-developer`
* :doc:`/python-bindings`
* :doc:`/environment_variables`
* :doc:`/api-html/index`
* :doc:`/code-of-conduct`
* |
* **Documentation for Developers**
* :doc:`/libcamera_architecture`
* :doc:`/guides/pipeline-handler`
* :doc:`/guides/ipa`
* :doc:`/camera-sensor-model`
* :doc:`/guides/tracing`
* :doc:`/software-isp-benchmarking`
* :doc:`/coding-style`
* :doc:`/internal-api-html/index`
* |
* **Documentation for System Integrators**
* :doc:`/lens_driver_requirements`
* :doc:`/sensor_driver_requirements`
..
The following directive adds the "documentation" class to all of the pages
generated by sphinx. This is not relevant in libcamera nor addressed in the
theme's CSS, since all of the pages here are documentation. It **is** used
to properly format the documentation pages on libcamera.org and so should not
be removed.
.. rst-class:: documentation

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Environment variables Environment variables
===================== =====================
@ -39,11 +37,6 @@ LIBCAMERA_IPA_MODULE_PATH
Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib`` Example value: ``${HOME}/.libcamera/lib:/opt/libcamera/vendor/lib``
LIBCAMERA_IPA_PROXY_PATH
Define custom full path for a proxy worker for a given executable name.
Example value: ``${HOME}/.libcamera/proxy/worker:/opt/libcamera/vendor/proxy/worker``
LIBCAMERA_PIPELINES_MATCH_LIST LIBCAMERA_PIPELINES_MATCH_LIST
Define an ordered list of pipeline names to be used to match the media Define an ordered list of pipeline names to be used to match the media
devices in the system. The pipeline handler names used to populate the devices in the system. The pipeline handler names used to populate the
@ -57,11 +50,6 @@ LIBCAMERA_RPI_CONFIG_FILE
Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml`` Example value: ``/usr/local/share/libcamera/pipeline/rpi/vc4/minimal_mem.yaml``
LIBCAMERA_<NAME>_TUNING_FILE
Define a custom IPA tuning file to use with the pipeline handler `NAME`.
Example value: ``/usr/local/share/libcamera/ipa/rpi/vc4/custom_sensor.json``
Further details Further details
--------------- ---------------

View file

@ -1,150 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
Feature Requirements
====================
Device enumeration
------------------
The library shall support enumerating all camera devices available in the
system, including both fixed cameras and hotpluggable cameras. It shall
support cameras plugged and unplugged after the initialization of the
library, and shall offer a mechanism to notify applications of camera plug
and unplug.
The following types of cameras shall be supported:
* Internal cameras designed for point-and-shoot still image and video
capture usage, either controlled directly by the CPU, or exposed through
an internal USB bus as a UVC device.
* External UVC cameras designed for video conferencing usage.
Other types of camera, including analog cameras, depth cameras, thermal
cameras, external digital picture or movie cameras, are out of scope for
this project.
A hardware device that includes independent camera sensors, such as front
and back sensors in a phone, shall be considered as multiple camera devices
for the purpose of this library.
Independent Camera Devices
--------------------------
When multiple cameras are present in the system and are able to operate
independently from each other, the library shall expose them as multiple
camera devices and support parallel operation without any additional usage
restriction apart from the limitations inherent to the hardware (such as
memory bandwidth, CPU usage or number of CSI-2 receivers for instance).
Independent processes shall be able to use independent cameras devices
without interfering with each other. A single camera device shall be
usable by a single process at a time.
Multiple streams support
------------------------
The library shall support multiple video streams running in parallel
for each camera device, within the limits imposed by the system.
Per frame controls
------------------
The library shall support controlling capture parameters for each stream
on a per-frame basis, on a best effort basis based on the capabilities of the
hardware and underlying software stack (including kernel drivers and
firmware). It shall apply capture parameters to the frame they target, and
report the value of the parameters that have effectively been used for each
captured frame.
When a camera device supports multiple streams, the library shall allow both
control of each stream independently, and control of multiple streams
together. Streams that are controlled together shall be synchronized. No
synchronization is required for streams controlled independently.
Capability Enumeration
----------------------
The library shall expose capabilities of each camera device in a way that
allows applications to discover those capabilities dynamically. Applications
shall be allowed to cache capabilities for as long as they are using the
library. If capabilities can change at runtime, the library shall offer a
mechanism to notify applications of such changes. Applications shall not
cache capabilities in long term storage between runs.
Capabilities shall be discovered dynamically at runtime from the device when
possible, and may come, in part or in full, from platform configuration
data.
Device Profiles
---------------
The library may define different camera device profiles, each with a minimum
set of required capabilities. Applications may use those profiles to quickly
determine the level of features exposed by a device without parsing the full
list of capabilities. Camera devices may implement additional capabilities
on top of the minimum required set for the profile they expose.
3A and Image Enhancement Algorithms
-----------------------------------
The library shall provide a basic implementation of Image Processing Algorithms
to serve as a reference for the internal API. This shall including auto exposure
and gain and auto white balance. Camera devices that include a focus lens shall
implement auto focus. Additional image enhancement algorithms, such as noise
reduction or video stabilization, may be implemented. Device vendors are
expected to provide a fully-fledged implementation compatible with their
Pipeline Handler. One goal of the libcamera project is to create an environment
in which the community will be able to compete with the closed-source vendor
biaries and develop a high quality open source implementation.
All algorithms may be implemented in hardware or firmware outside of the
library, or in software in the library. They shall all be controllable by
applications.
The library shall be architectured to isolate the 3A and image enhancement
algorithms in a component with a documented API, respectively called the 3A
component and the 3A API. The 3A API shall be stable, and shall allow both
open-source and closed-source implementations of the 3A component.
The library may include statically-linked open-source 3A components, and
shall support dynamically-linked open-source and closed-source 3A
components.
Closed-source 3A Component Sandboxing
-------------------------------------
For security purposes, it may be desired to run closed-source 3A components
in a separate process. The 3A API would in such a case be transported over
IPC. The 3A API shall make it possible to use any IPC mechanism that
supports passing file descriptors.
The library may implement an IPC mechanism, and shall support third-party
platform-specific IPC mechanisms through the implementation of a
platform-specific 3A API wrapper. No modification to the library shall be
needed to use such third-party IPC mechanisms.
The 3A component shall not directly access any device node on the system.
Such accesses shall instead be performed through the 3A API. The library
shall validate all accesses and restrict them to what is absolutely required
by 3A components.
V4L2 Compatibility Layer
------------------------
The project shall support traditional V4L2 application through an additional
libcamera wrapper library. The wrapper library shall trap all accesses to
camera devices through `LD_PRELOAD`, and route them through libcamera to
emulate a high-level V4L2 camera device. It shall expose camera device
features on a best-effort basis, and aim for the level of features
traditionally available from a UVC camera designed for video conferencing.
Android Camera HAL v3 Compatibility
-----------------------------------
The library API shall expose all the features required to implement an
Android Camera HAL v3 on top of libcamera. Some features of the HAL may be
omitted as long as they can be implemented separately in the HAL, such as
JPEG encoding, or YUV reprocessing.

View file

@ -1,46 +0,0 @@
#!/usr/bin/env python3
# SPDX-License-Identifier: GPL-2.0-or-later
# Copyright (C) 2024, Google Inc.
#
# Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
#
# Generate Doxyfile from a template
import argparse
import os
import string
import sys
def fill_template(template, data):
template = open(template, 'rb').read()
template = template.decode('utf-8')
template = string.Template(template)
return template.substitute(data)
def main(argv):
parser = argparse.ArgumentParser()
parser.add_argument('-o', dest='output', metavar='file',
type=argparse.FileType('w', encoding='utf-8'),
default=sys.stdout,
help='Output file name (default: standard output)')
parser.add_argument('template', metavar='doxyfile.tmpl', type=str,
help='Doxyfile template')
parser.add_argument('inputs', type=str, nargs='*',
help='Input files')
args = parser.parse_args(argv[1:])
inputs = [f'"{os.path.realpath(input)}"' for input in args.inputs]
data = fill_template(args.template, {'inputs': (' \\\n' + ' ' * 25).join(inputs)})
args.output.write(data)
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View file

@ -1,5 +1,4 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. Getting started information is defined in the project README file. .. Getting started information is defined in the project README file.
.. include:: ../README.rst .. include:: ../README.rst
:start-after: .. section-begin-getting-started :start-after: .. section-begin-getting-started

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Using libcamera in a C++ application Using libcamera in a C++ application
==================================== ====================================
@ -118,21 +116,19 @@ available.
.. code:: cpp .. code:: cpp
auto cameras = cm->cameras(); if (cm->cameras().empty()) {
if (cameras.empty()) {
std::cout << "No cameras were identified on the system." std::cout << "No cameras were identified on the system."
<< std::endl; << std::endl;
cm->stop(); cm->stop();
return EXIT_FAILURE; return EXIT_FAILURE;
} }
std::string cameraId = cameras[0]->id(); std::string cameraId = cm->cameras()[0]->id();
camera = cm->get(cameraId); camera = cm->get(cameraId);
/* /*
* Note that `camera` may not compare equal to `cameras[0]`. * Note that is equivalent to:
* In fact, it might simply be a `nullptr`, as the particular * camera = cm->cameras()[0];
* device might have disappeared (and reappeared) in the meantime.
*/ */
Once a camera has been selected an application needs to acquire an exclusive Once a camera has been selected an application needs to acquire an exclusive
@ -483,7 +479,7 @@ instance. An example of how to write image data to disk is available in the
`FileSink class`_ which is a part of the ``cam`` utility application in the `FileSink class`_ which is a part of the ``cam`` utility application in the
libcamera repository. libcamera repository.
.. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/apps/cam/file_sink.cpp .. _FileSink class: https://git.libcamera.org/libcamera/libcamera.git/tree/src/cam/file_sink.cpp
With the handling of this request completed, it is possible to re-use the With the handling of this request completed, it is possible to re-use the
request and the associated buffers and re-queue it to the camera request and the associated buffers and re-queue it to the camera
@ -618,7 +614,7 @@ accordingly. In this example, the application file has been named
simple_cam = executable('simple-cam', simple_cam = executable('simple-cam',
'simple-cam.cpp', 'simple-cam.cpp',
dependencies: dependency('libcamera')) dependencies: dependency('libcamera', required : true))
The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to The ``dependencies`` line instructs meson to ask ``pkgconfig`` (or ``cmake``) to
locate the ``libcamera`` library, which the test application will be locate the ``libcamera`` library, which the test application will be

View file

@ -0,0 +1,319 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
Developers guide to libcamera
=============================
The Linux kernel handles multimedia devices through the 'Linux media' subsystem
and provides a set of APIs (application programming interfaces) known
collectively as V4L2 (`Video for Linux 2`_) and the `Media Controller`_ API
which provide an interface to interact and control media devices.
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors)
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace which is commonly implemented separately
by vendors without a common architecture or API for application developers.
libcamera provides a complete camera stack for Linux based systems to abstract
functionality desired by camera application developers and process the
configuration of hardware and image control algorithms required to obtain
desirable results from the camera.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
In this developers guide, we will explore the `Camera Stack`_ and how it is
can be visualised at a high level, and explore the internal `Architecture`_ of
the libcamera library with its components. The current `Platform Support`_ is
detailed, as well as an overview of the `Licensing`_ requirements of the
project.
This introduction is followed by a walkthrough tutorial to newcomers wishing to
support a new platform with the `Pipeline Handler Writers Guide`_ and for those
looking to make use of the libcamera native API an `Application Writers Guide`_
provides a tutorial of the key APIs exposed by libcamera.
.. _Pipeline Handler Writers Guide: pipeline-handler.html
.. _Application Writers Guide: application-developer.html
.. TODO: Correctly link to the other articles of the guide
Camera Stack
------------
The libcamera library is implemented in userspace, and makes use of underlying
kernel drivers that directly interact with hardware.
Applications can make use of libcamera through the native `libcamera API`_'s or
through an adaptation layer integrating libcamera into a larger framework.
.. _libcamera API: https://www.libcamera.org/api-html/index.html
::
Application Layer
/ +--------------+ +--------------+ +--------------+ +--------------+
| | Native | | Framework | | Native | | Android |
| | V4L2 | | Application | | libcamera | | Camera |
| | Application | | (gstreamer) | | Application | | Framework |
\ +--------------+ +--------------+ +--------------+ +--------------+
^ ^ ^ ^
| | | |
| | | |
v v | v
Adaptation Layer |
/ +--------------+ +--------------+ | +--------------+
| | V4L2 | | gstreamer | | | Android |
| | Compatibility| | element | | | Camera |
| | (preload) | |(libcamerasrc)| | | HAL |
\ +--------------+ +--------------+ | +--------------+
|
^ ^ | ^
| | | |
| | | |
v v v v
libcamera Framework
/ +--------------------------------------------------------------------+
| | |
| | libcamera |
| | |
\ +--------------------------------------------------------------------+
^ ^ ^
Userspace | | |
--------------------- | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises of four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a low-level
interface to userspace through the Linux kernel V4L2 family of APIs
(Media Controller API, V4L2 Video Device API and V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It handles all control
of the camera devices in its core component, libcamera, and exposes a native
C++ API to upper layers.
* The libcamera adaptation layer is an umbrella term designating the components
that interface to libcamera in other frameworks. Notable examples are the V4L2
compatibility layer, the gstreamer libcamera element, and the Android camera
HAL implementation based on libcamera which are provided as a part of the
libcamera project.
* The applications and upper level frameworks are based on the libcamera
framework or libcamera adaptation, and are outside of the scope of the
libcamera project, however example native applications (cam, qcam) are
provided for testing.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Development of this element is ongoing and is limited to a single stream.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the `Application Writers Guide`_
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Architecture
------------
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
`Pipeline Handler Writers Guide`_.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
An image processing algorithm (IPA) component is a loadable plugin that
implements 3A (Auto-Exposure, Auto-White Balance, and Auto-Focus) and other
algorithms.
The algorithms run on the CPU and interact with the camera devices through the
Pipeline Handler to control hardware image processing based on the parameters
supplied by upper layers, maintaining state and closing the control loop
of the ISP.
The component is sandboxed and can only interact with libcamera through the
API provided by the Pipeline Handler and an IPA has no direct access to kernel
camera devices.
Open source IPA modules built with libcamera can be run in the same process
space as libcamera, however external IPA modules are run in a separate process
from the main libcamera process. IPA modules have a restricted view of the
system, including no access to networking APIs and limited access to file
systems.
IPA modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA module.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Intel IPU3 (ipu3)
- Rockchip RK3399 (rkisp1)
- RaspberryPi 3 and 4 (rpi/vc4)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)
Licensing
---------
The libcamera core, is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
IPA Writer's Guide IPA Writer's Guide
================== ==================

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Pipeline Handler Writers Guide Pipeline Handler Writers Guide
============================== ==============================
@ -153,14 +151,13 @@ integrates with the libcamera build system, and a *vivid.cpp* file that matches
the name of the pipeline. the name of the pipeline.
In the *meson.build* file, add the *vivid.cpp* file as a build source for In the *meson.build* file, add the *vivid.cpp* file as a build source for
libcamera by adding it to the global meson ``libcamera_internal_sources`` libcamera by adding it to the global meson ``libcamera_sources`` variable:
variable:
.. code-block:: none .. code-block:: none
# SPDX-License-Identifier: CC0-1.0 # SPDX-License-Identifier: CC0-1.0
libcamera_internal_sources += files([ libcamera_sources += files([
'vivid.cpp', 'vivid.cpp',
]) ])
@ -186,7 +183,7 @@ to the libcamera build options in the top level ``meson_options.txt``.
option('pipelines', option('pipelines',
type : 'array', type : 'array',
choices : ['ipu3', 'rkisp1', 'rpi/pisp', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'], choices : ['ipu3', 'rkisp1', 'rpi/vc4', 'simple', 'uvcvideo', 'vimc', 'vivid'],
description : 'Select which pipeline handlers to include') description : 'Select which pipeline handlers to include')
@ -213,7 +210,7 @@ implementations for the overridden class members.
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override; std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start(Camera *camera, const ControlList *controls) override; int start(Camera *camera, const ControlList *controls) override;
void stopDevice(Camera *camera) override; void stop(Camera *camera) override;
int queueRequestDevice(Camera *camera, Request *request) override; int queueRequestDevice(Camera *camera, Request *request) override;
@ -247,7 +244,7 @@ implementations for the overridden class members.
return -1; return -1;
} }
void PipelineHandlerVivid::stopDevice(Camera *camera) void PipelineHandlerVivid::stop(Camera *camera)
{ {
} }
@ -521,14 +518,14 @@ handler and camera manager using `registerCamera`_.
Finally with a successful construction, we return 'true' indicating that the Finally with a successful construction, we return 'true' indicating that the
PipelineHandler successfully matched and constructed a device. PipelineHandler successfully matched and constructed a device.
.. _Camera::create: https://libcamera.org/internal-api-html/classlibcamera_1_1Camera.html#adf5e6c22411f953bfaa1ae21155d6c31 .. _Camera::create: https://libcamera.org/api-html/classlibcamera_1_1Camera.html#a453740e0d2a2f495048ae307a85a2574
.. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b .. _registerCamera: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html#adf02a7f1bbd87aca73c0e8d8e0e6c98b
.. code-block:: cpp .. code-block:: cpp
std::set<Stream *> streams{ &data->stream_ }; std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams); std::shared_ptr<Camera> camera = Camera::create(this, data->video_->deviceName(), streams);
registerCamera(std::move(camera)); registerCamera(std::move(camera), std::move(data));
return true; return true;
@ -554,7 +551,8 @@ Our match function should now look like the following:
/* Create and register the camera. */ /* Create and register the camera. */
std::set<Stream *> streams{ &data->stream_ }; std::set<Stream *> streams{ &data->stream_ };
std::shared_ptr<Camera> camera = Camera::create(std::move(data), data->video_->deviceName(), streams); const std::string &id = data->video_->deviceName();
std::shared_ptr<Camera> camera = Camera::create(data.release(), id, streams);
registerCamera(std::move(camera)); registerCamera(std::move(camera));
return true; return true;
@ -592,11 +590,11 @@ immutable properties of the ``Camera`` device.
The libcamera controls and properties are defined in YAML form which is The libcamera controls and properties are defined in YAML form which is
processed to automatically generate documentation and interfaces. Controls are processed to automatically generate documentation and interfaces. Controls are
defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties defined by the src/libcamera/`control_ids_core.yaml`_ file and camera properties
are defined by src/libcamera/`property_ids_core.yaml`_. are defined by src/libcamera/`properties_ids_core.yaml`_.
.. _controls framework: https://libcamera.org/api-html/controls_8h.html .. _controls framework: https://libcamera.org/api-html/controls_8h.html
.. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html .. _control_ids_core.yaml: https://libcamera.org/api-html/control__ids_8h.html
.. _property_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html .. _properties_ids_core.yaml: https://libcamera.org/api-html/property__ids_8h.html
Pipeline handlers can optionally register the list of controls an application Pipeline handlers can optionally register the list of controls an application
can set as well as a list of immutable camera properties. Being both can set as well as a list of immutable camera properties. Being both
@ -799,7 +797,8 @@ derived class, and assign it to a base class pointer.
.. code-block:: cpp .. code-block:: cpp
auto config = std::make_unique<VividCameraConfiguration>(); VividCameraData *data = cameraData(camera);
CameraConfiguration *config = new VividCameraConfiguration();
A ``CameraConfiguration`` is specific to each pipeline, so you can only create A ``CameraConfiguration`` is specific to each pipeline, so you can only create
it from the pipeline handler code path. Applications can also generate an empty it from the pipeline handler code path. Applications can also generate an empty
@ -827,7 +826,9 @@ To generate a ``StreamConfiguration``, you need a list of pixel formats and
frame sizes which are supported as outputs of the stream. You can fetch a map of frame sizes which are supported as outputs of the stream. You can fetch a map of
the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output the ``V4LPixelFormat`` and ``SizeRange`` supported by the underlying output
device, but the pipeline handler needs to convert this to a device, but the pipeline handler needs to convert this to a
``libcamera::PixelFormat`` type to pass to applications. ``libcamera::PixelFormat`` type to pass to applications. We do this here using
``std::transform`` to convert the formats and populate a new ``PixelFormat`` map
as shown below.
Continue adding the following code example to our ``generateConfiguration`` Continue adding the following code example to our ``generateConfiguration``
implementation. implementation.
@ -837,12 +838,14 @@ implementation.
std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats = std::map<V4L2PixelFormat, std::vector<SizeRange>> v4l2Formats =
data->video_->formats(); data->video_->formats();
std::map<PixelFormat, std::vector<SizeRange>> deviceFormats; std::map<PixelFormat, std::vector<SizeRange>> deviceFormats;
std::transform(v4l2Formats.begin(), v4l2Formats.end(),
for (auto &[v4l2PixelFormat, sizes] : v4l2Formats) { std::inserter(deviceFormats, deviceFormats.begin()),
PixelFormat pixelFormat = v4l2PixelFormat.toPixelFormat(); [&](const decltype(v4l2Formats)::value_type &format) {
if (pixelFormat.isValid()) return decltype(deviceFormats)::value_type{
deviceFormats.try_emplace(pixelFormat, std::move(sizes)); format.first.toPixelFormat(),
} format.second
};
});
The `StreamFormats`_ class holds information about the pixel formats and frame The `StreamFormats`_ class holds information about the pixel formats and frame
sizes that a stream can support. The class groups size information by the pixel sizes that a stream can support. The class groups size information by the pixel
@ -932,9 +935,9 @@ Add the following function implementation to your file:
StreamConfiguration &cfg = config_[0]; StreamConfiguration &cfg = config_[0];
const std::vector<libcamera::PixelFormat> &formats = cfg.formats().pixelformats(); const std::vector<libcamera::PixelFormat> formats = cfg.formats().pixelformats();
if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) { if (std::find(formats.begin(), formats.end(), cfg.pixelFormat) == formats.end()) {
cfg.pixelFormat = formats[0]; cfg.pixelFormat = cfg.formats().pixelformats()[0];
LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString(); LOG(VIVID, Debug) << "Adjusting format to " << cfg.pixelFormat.toString();
status = Adjusted; status = Adjusted;
} }
@ -1152,7 +1155,7 @@ available to the devices which have to be started and ready to produce
images. At the end of a capture session the ``Camera`` device needs to be images. At the end of a capture session the ``Camera`` device needs to be
stopped, to gracefully clean up any allocated memory and stop the hardware stopped, to gracefully clean up any allocated memory and stop the hardware
devices. Pipeline handlers implement two functions for these purposes, the devices. Pipeline handlers implement two functions for these purposes, the
``start()`` and ``stopDevice()`` functions. ``start()`` and ``stop()`` functions.
The memory initialization phase that happens at ``start()`` time serves to The memory initialization phase that happens at ``start()`` time serves to
configure video devices to be able to use memory buffers exported as dma-buf configure video devices to be able to use memory buffers exported as dma-buf
@ -1255,8 +1258,8 @@ algorithms, or other devices you should also stop them.
.. _releaseBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a191619c152f764e03bc461611f3fcd35 .. _releaseBuffers: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a191619c152f764e03bc461611f3fcd35
Of course we also need to handle the corresponding actions to stop streaming on Of course we also need to handle the corresponding actions to stop streaming on
a device, Add the following to the ``stopDevice()`` function, to stop the a device, Add the following to the ``stop`` function, to stop the stream with
stream with the `streamOff`_ function and release all buffers. the `streamOff`_ function and release all buffers.
.. _streamOff: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a61998710615bdf7aa25a046c8565ed66 .. _streamOff: https://libcamera.org/api-html/classlibcamera_1_1V4L2VideoDevice.html#a61998710615bdf7aa25a046c8565ed66
@ -1344,7 +1347,7 @@ before being set.
continue; continue;
} }
int32_t value = std::lround(it.second.get<float>() * 128 + offset); int32_t value = lroundf(it.second.get<float>() * 128 + offset);
controls.set(cid, std::clamp(value, 0, 255)); controls.set(cid, std::clamp(value, 0, 255));
} }
@ -1408,7 +1411,7 @@ value translation operations:
.. code-block:: cpp .. code-block:: cpp
#include <cmath> #include <math.h>
Frame completion and event handling Frame completion and event handling
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: ../documentation-contents.rst
Tracing Guide Tracing Guide
============= =============

View file

@ -1,31 +1,27 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: introduction.rst .. Front page matter is defined in the project README file.
.. include:: ../README.rst
:start-after: .. section-begin-libcamera
:end-before: .. section-end-libcamera
.. toctree:: .. toctree::
:maxdepth: 1 :maxdepth: 1
:caption: Contents: :caption: Contents:
Home <self> Home <self>
Docs <docs>
Contribute <contributing> Contribute <contributing>
Getting Started <getting-started> Getting Started <getting-started>
Developer Guide <guides/introduction>
Application Writer's Guide <guides/application-developer> Application Writer's Guide <guides/application-developer>
Camera Sensor Model <camera-sensor-model>
Environment variables <environment_variables>
Feature Requirements <feature_requirements>
IPA Writer's guide <guides/ipa>
Lens driver requirements <lens_driver_requirements>
libcamera Architecture <libcamera_architecture>
Pipeline Handler Writer's Guide <guides/pipeline-handler> Pipeline Handler Writer's Guide <guides/pipeline-handler>
Python Bindings <python-bindings> IPA Writer's guide <guides/ipa>
Sensor driver requirements <sensor_driver_requirements>
SoftwareISP Benchmarking <software-isp-benchmarking>
Tracing guide <guides/tracing> Tracing guide <guides/tracing>
Environment variables <environment_variables>
Design document: AE <design/ae> Sensor driver requirements <sensor_driver_requirements>
Lens driver requirements <lens_driver_requirements>
.. toctree:: Python Bindings <python-bindings>
:hidden: Camera Sensor Model <camera-sensor-model>
SoftwareISP Benchmarking <software-isp-benchmarking>
introduction

View file

@ -1,8 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. _internal-api:
Internal API Reference
======================
:: Placeholder for Doxygen documentation

View file

@ -1,224 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
************
Introduction
************
.. toctree::
:hidden:
API <api-html/index>
Internal API <internal-api-html/index>
What is libcamera?
==================
libcamera is an open source complex camera support library for Linux, Android
and ChromeOS. The library interfaces with Linux kernel device drivers and
provides an intuitive API to developers in order to simplify the complexity
involved in capturing images from complex cameras on Linux systems.
What is a "complex camera"?
===========================
A modern "camera" tends to infact be several different pieces of hardware which
must all be controlled together in order to produce and capture images of
appropriate quality. A hardware pipeline typically consists of a camera sensor
that captures raw frames and transmits them on a bus, a receiver that decodes
the bus signals, and an image signal processor that processes raw frames to
produce usable images in a standard format. The Linux kernel handles these
multimedia devices through the 'Linux media' subsystem and provides a set of
application programming interfaces known collectively as the
V4L2 (`Video for Linux 2`_) and the `Media Controller`_ APIs, which provide an
interface to interact and control media devices.
.. _Video for Linux 2: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/v4l/v4l2.html
.. _Media Controller: https://www.linuxtv.org/downloads/v4l-dvb-apis-new/userspace-api/mediactl/media-controller.html
Included in this subsystem are drivers for camera sensors, CSI2 (Camera
Serial Interface) receivers, and ISPs (Image Signal Processors).
The usage of these drivers to provide a functioning camera stack is a
responsibility that lies in userspace, and is commonly implemented separately
by vendors without a common architecture or API for application developers. This
adds a lot of complexity to the task, particularly when considering that the
differences in hardware pipelines and their representation in the kernel's APIs
often necessitate bespoke handling.
What is libcamera for?
======================
libcamera provides a complete camera stack for Linux-based systems to abstract
the configuration of hardware and image control algorithms required to obtain
desirable results from the camera through the kernel's APIs, reducing those
operations to a simple and consistent method for developers. In short instead of
having to deal with this:
.. graphviz:: mali-c55.dot
you can instead simply deal with:
.. code-block:: python
>>> import libcamera as lc
>>> camera_manager = lc.CameraManager.singleton()
[0:15:59.582029920] [504] INFO Camera camera_manager.cpp:313 libcamera v0.3.0+182-01e57380
>>> for camera in camera_manager.cameras:
... print(f' - {camera.id}')
...
- mali-c55 tpg
- imx415 1-001a
The library handles the rest for you. These documentary pages give more
information on the internal workings of libcamera (and the kernel camera stack
that lies behind it) as well as guidance on using libcamera in an application or
extending the library with support for your hardware (through the pipeline
handler and IPA module writer's guides).
How should I use it?
====================
There are a few ways you might want to use libcamera, depending on your
application. It's always possible to use the library directly, and you can find
detailed information on how to do so in the
:doc:`application writer's guide <guides/application-developer>`.
It is often more appropriate to use one of the frameworks with libcamera
support. For example an application powering an embedded media device
incorporating capture, encoding and streaming of both video and audio would
benefit from using `GStreamer`_, for which libcamera provides a plugin.
Similarly an application for user-facing devices like a laptop would likely
benefit accessing cameras through the XDG camera portal and `pipewire`_, which
brings the advantages of resource sharing (multiple applications accessing the
stream at the same time) and access control.
.. _GStreamer: https://gstreamer.freedesktop.org/
.. _pipewire: https://pipewire.org/
Camera Stack
============
::
a c / +-------------+ +-------------+ +-------------+ +-------------+
p a | | Native | | Framework | | Native | | Android |
p t | | V4L2 | | Application | | libcamera | | Camera |
l i | | Application | | (gstreamer) | | Application | | Framework |
i o \ +-------------+ +-------------+ +-------------+ +-------------+
n ^ ^ ^ ^
| | | |
l a | | | |
i d v v | v
b a / +-------------+ +-------------+ | +-------------+
c p | | V4L2 | | Camera | | | Android |
a t | | Compat. | | Framework | | | Camera |
m a | | | | (gstreamer) | | | HAL |
e t \ +-------------+ +-------------+ | +-------------+
r i ^ ^ | ^
a o | | | |
n | | | |
/ | ,................................................
| | ! : Language : !
l f | | ! : Bindings : !
i r | | ! : (optional) : !
b a | | \...............................................'
c m | | | | |
a e | | | | |
m w | v v v v
e o | +----------------------------------------------------------------+
r r | | |
a k | | libcamera |
| | |
\ +----------------------------------------------------------------+
^ ^ ^
Userspace | | |
------------------------ | ---------------- | ---------------- | ---------------
Kernel | | |
v v v
+-----------+ +-----------+ +-----------+
| Media | <--> | Video | <--> | V4L2 |
| Device | | Device | | Subdev |
+-----------+ +-----------+ +-----------+
The camera stack comprises four software layers. From bottom to top:
* The kernel drivers control the camera hardware and expose a
low-level interface to userspace through the Linux kernel V4L2
family of APIs (Media Controller API, V4L2 Video Device API and
V4L2 Subdev API).
* The libcamera framework is the core part of the stack. It
handles all control of the camera devices in its core component,
libcamera, and exposes a native C++ API to upper layers. Optional
language bindings allow interfacing to libcamera from other
programming languages.
Those components live in the same source code repository and
all together constitute the libcamera framework.
* The libcamera adaptation is an umbrella term designating the
components that interface to libcamera in other frameworks.
Notable examples are a V4L2 compatibility layer, a gstreamer
libcamera element, and an Android camera HAL implementation based
on libcamera.
Those components can live in the libcamera project source code
in separate repositories, or move to their respective project's
repository (for instance the gstreamer libcamera element).
* The applications and upper level frameworks are based on the
libcamera framework or libcamera adaptation, and are outside of
the scope of the libcamera project.
V4L2 Compatibility Layer
V4L2 compatibility is achieved through a shared library that traps all
accesses to camera devices and routes them to libcamera to emulate high-level
V4L2 camera devices. It is injected in a process address space through
``LD_PRELOAD`` and is completely transparent for applications.
The compatibility layer exposes camera device features on a best-effort basis,
and aims for the level of features traditionally available from a UVC camera
designed for video conferencing.
Android Camera HAL
Camera support for Android is achieved through a generic Android camera HAL
implementation on top of libcamera. The HAL implements features required by
Android and out of scope from libcamera, such as JPEG encoding support.
This component is used to provide support for ChromeOS platforms.
GStreamer element (gstlibcamerasrc)
A `GStreamer element`_ is provided to allow capture from libcamera supported
devices through GStreamer pipelines, and connect to other elements for further
processing.
Native libcamera API
Applications can make use of the libcamera API directly using the C++
API. An example application and walkthrough using the libcamera API can be
followed in the :doc:`Application writer's guide </guides/application-developer>`
.. _GStreamer element: https://gstreamer.freedesktop.org/documentation/application-development/basics/elements.html
Licensing
=========
The libcamera core is covered by the `LGPL-2.1-or-later`_ license. Pipeline
Handlers are a part of the libcamera code base and need to be contributed
upstream by device vendors. IPA modules included in libcamera are covered by a
free software license, however third-parties may develop IPA modules outside of
libcamera and distribute them under a closed-source license, provided they do
not include source code from the libcamera project.
The libcamera project itself contains multiple libraries, applications and
utilities. Licenses are expressed through SPDX tags in text-based files that
support comments, and through the .reuse/dep5 file otherwise. A copy of all
licenses are stored in the LICENSES directory, and a full summary of the
licensing used throughout the project can be found in the COPYING.rst document.
Applications which link dynamically against libcamera and use only the public
API are an independent work of the authors and have no license restrictions
imposed upon them from libcamera.
.. _LGPL-2.1-or-later: https://spdx.org/licenses/LGPL-2.1-or-later.html

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _lens-driver-requirements: .. _lens-driver-requirements:
Lens Driver Requirements Lens Driver Requirements

View file

@ -1,168 +0,0 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
libcamera Architecture
======================
While offering a unified API towards upper layers, and presenting itself as a
single library, libcamera isn't monolithic. It exposes multiple components
through its public API and is built around a set of separate helpers internally.
Hardware abstractions are handled through the use of device-specific components
where required and dynamically loadable plugins are used to separate image
processing algorithms from the core libcamera codebase.
::
--------------------------< libcamera Public API >---------------------------
^ ^
| |
v v
+-------------+ +---------------------------------------------------+
| Camera | | Camera Device |
| Manager | | +-----------------------------------------------+ |
+-------------+ | | Device-Agnostic | |
^ | | | |
| | | +--------------------------+ |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | { +-----------------+ } |
| | | | } | //// Image //// | { |
| | | | <-> | / Processing // | } |
| | | | } | / Algorithms // | { |
| | | | { +-----------------+ } |
| | | | ~~~~~~~~~~~~~~~~~~~~~~~ |
| | | | ========================== |
| | | | +-----------------+ |
| | | | | // Pipeline /// | |
| | | | <-> | /// Handler /// | |
| | | | | /////////////// | |
| | +--------------------+ +-----------------+ |
| | Device-Specific |
| +---------------------------------------------------+
| ^ ^
| | |
v v v
+--------------------------------------------------------------------+
| Helpers and Support Classes |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| | MC & V4L2 | | Buffers | | Sandboxing | | Plugins | |
| | Support | | Allocator | | IPC | | Manager | |
| +-------------+ +-------------+ +-------------+ +-------------+ |
| +-------------+ +-------------+ |
| | Pipeline | | ... | |
| | Runner | | | |
| +-------------+ +-------------+ |
+--------------------------------------------------------------------+
/// Device-Specific Components
~~~ Sandboxing
Camera Manager
The Camera Manager enumerates cameras and instantiates Pipeline Handlers to
manage each Camera that libcamera supports. The Camera Manager supports
hotplug detection and notification events when supported by the underlying
kernel devices.
There is only ever one instance of the Camera Manager running per application.
Each application's instance of the Camera Manager ensures that only a single
application can take control of a camera device at once.
Read the `Camera Manager API`_ documentation for more details.
.. _Camera Manager API: https://libcamera.org/api-html/classlibcamera_1_1CameraManager.html
Camera Device
The Camera class represents a single item of camera hardware that is capable
of producing one or more image streams, and provides the API to interact with
the underlying device.
If a system has multiple instances of the same hardware attached, each has its
own instance of the camera class.
The API exposes full control of the device to upper layers of libcamera through
the public API, making it the highest level object libcamera exposes, and the
object that all other API operations interact with from configuration to
capture.
Read the `Camera API`_ documentation for more details.
.. _Camera API: https://libcamera.org/api-html/classlibcamera_1_1Camera.html
Pipeline Handler
The Pipeline Handler manages the complex pipelines exposed by the kernel
drivers through the Media Controller and V4L2 APIs. It abstracts pipeline
handling to hide device-specific details from the rest of the library, and
implements both pipeline configuration based on stream configuration, and
pipeline runtime execution and scheduling when needed by the device.
The Pipeline Handler lives in the same process as the rest of the library, and
has access to all helpers and kernel camera-related devices.
Hardware abstraction is handled by device specific Pipeline Handlers which are
derived from the Pipeline Handler base class allowing commonality to be shared
among the implementations.
Derived pipeline handlers create Camera device instances based on the devices
they detect and support on the running system, and are responsible for
managing the interactions with a camera device.
More details can be found in the `PipelineHandler API`_ documentation, and the
:doc:`Pipeline Handler Writers Guide <guides/pipeline-handler>`.
.. _PipelineHandler API: https://libcamera.org/api-html/classlibcamera_1_1PipelineHandler.html
Image Processing Algorithms
Together with the hardware image processing and hardware statistics
collection, the Image Processing Algorithms (IPA) implement 3A (Auto-Exposure,
Auto-White Balance and Auto-Focus) and other algorithms. They run on the CPU
and control hardware image processing based on the parameters supplied by
upper layers, closing the control loop of the ISP.
IPAs are loaded as external plugins named IPA Modules. IPA Modules can be part
of the libcamera code base or provided externally by camera vendors as
open-source or closed-source components.
Open source IPA Modules built with libcamera are run in the same process space
as libcamera. External IPA Modules are run in a separate sandboxed process. In
either case, they can only interact with libcamera through the API provided by
the Pipeline Handler. They have a restricted view of the system, with no direct
access to kernel camera devices, no access to networking APIs, and limited
access to file systems. All their accesses to image and metadata are mediated
by dmabuf instances explicitly passed by the Pipeline Handler to the IPA
Module.
IPA Modules are only required for platforms and devices with an ISP controlled
by the host CPU. Camera sensors which have an integrated ISP are not
controlled through the IPA Module.
Helpers and Support Classes
While Pipeline Handlers are device-specific, implementations are expected to
share code due to usage of identical APIs towards the kernel camera drivers
and the Image Processing Algorithms. This includes without limitation handling
of the MC and V4L2 APIs, buffer management through dmabuf, and pipeline
discovery, configuration and scheduling. Such code will be factored out to
helpers when applicable.
Other parts of libcamera will also benefit from factoring code out to
self-contained support classes, even if such code is present only once in the
code base, in order to keep the source code clean and easy to read. This
should be the case for instance for plugin management.
Platform Support
----------------
The library currently supports the following hardware platforms specifically
with dedicated pipeline handlers:
- Arm Mali-C55
- Intel IPU3 (ipu3)
- NXP i.MX8MP (imx8-isi and rkisp1)
- RaspberryPi 3, 4 and zero (rpi/vc4)
- Rockchip RK3399 (rkisp1)
Furthermore, generic platform support is provided for the following:
- USB video device class cameras (uvcvideo)
- iMX7, IPU6, Allwinner Sun6i (simple)
- Virtual media controller driver for test use cases (vimc)

View file

@ -1,33 +0,0 @@
/**
\mainpage libcamera API reference
Welcome to the API reference for <a href="https://libcamera.org/">libcamera</a>,
a complex camera support library for Linux, Android and ChromeOS. These pages
are automatically generated from the libcamera source code and describe the API
in detail - if this is your first interaction with libcamera then you may find
it useful to visit the [documentation](../introduction.html) in
the first instance, which can provide a more generic introduction to the
library's concepts.
\if internal
As a follow-on to the developer's guide, to assist you in adding support for
your platform the [pipeline handler writer's guide](../guides/pipeline-handler.html)
and the [ipa module writer's guide](../guides/ipa.html) should be helpful.
The full libcamera API is documented here. If you wish to see only the public
part of the API you can use [these pages](../api-html/index.html) instead.
\else
As a follow-on to the developer's guide, to assist you in using libcamera within
your project the [application developer's guide](../guides/application-developer.html)
gives an overview on how to achieve that.
Only the public part of the libcamera API is documented here; if you are a
developer seeking to add support for your hardware to the library or make other
improvements, you should switch to the internal API
[reference pages](../internal-api-html/index.html) instead.
\endif
*/

View file

@ -1,25 +0,0 @@
/* SPDX-License-Identifier: CC-BY-SA-4.0 */
digraph board {
rankdir=TB
n00000001 [label="{{} | mali-c55 tpg\n/dev/v4l-subdev0 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000001:port0 -> n00000003:port0 [style=dashed]
n00000003 [label="{{<port0> 0 | <port4> 4} | mali-c55 isp\n/dev/v4l-subdev1 | {<port1> 1 | <port2> 2 | <port3> 3}}", shape=Mrecord, style=filled, fillcolor=green]
n00000003:port1 -> n00000009:port0 [style=bold]
n00000003:port2 -> n00000009:port2 [style=bold]
n00000003:port1 -> n0000000d:port0 [style=bold]
n00000003:port3 -> n0000001c
n00000009 [label="{{<port0> 0 | <port2> 2} | mali-c55 resizer fr\n/dev/v4l-subdev2 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000009:port1 -> n00000010
n0000000d [label="{{<port0> 0} | mali-c55 resizer ds\n/dev/v4l-subdev3 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n0000000d:port1 -> n00000014
n00000010 [label="mali-c55 fr\n/dev/video0", shape=box, style=filled, fillcolor=yellow]
n00000014 [label="mali-c55 ds\n/dev/video1", shape=box, style=filled, fillcolor=yellow]
n00000018 [label="mali-c55 3a params\n/dev/video2", shape=box, style=filled, fillcolor=yellow]
n00000018 -> n00000003:port4
n0000001c [label="mali-c55 3a stats\n/dev/video3", shape=box, style=filled, fillcolor=yellow]
n00000030 [label="{{<port0> 0} | lte-csi2-rx\n/dev/v4l-subdev4 | {<port1> 1}}", shape=Mrecord, style=filled, fillcolor=green]
n00000030:port1 -> n00000003:port0
n00000035 [label="{{} | imx415 1-001a\n/dev/v4l-subdev5 | {<port0> 0}}", shape=Mrecord, style=filled, fillcolor=green]
n00000035:port0 -> n00000030:port0 [style=bold]
}

View file

@ -24,100 +24,44 @@ if doxygen.found() and dot.found()
cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined)) cdata.set('PREDEFINED', ' \\\n\t\t\t '.join(doxygen_predefined))
doxyfile_common = configure_file(input : 'Doxyfile-common.in', doxyfile = configure_file(input : 'Doxyfile.in',
output : 'Doxyfile-common', output : 'Doxyfile',
configuration : cdata) configuration : cdata)
doxygen_public_input = [ doxygen_input = [
libcamera_base_public_headers, doxyfile,
libcamera_base_public_sources, libcamera_base_headers,
libcamera_public_headers, libcamera_base_sources,
libcamera_public_sources,
]
doxygen_internal_input = [
libcamera_base_private_headers,
libcamera_base_internal_sources,
libcamera_internal_headers, libcamera_internal_headers,
libcamera_internal_sources,
libcamera_ipa_headers, libcamera_ipa_headers,
libcamera_ipa_interfaces, libcamera_ipa_interfaces,
libcamera_public_headers,
libcamera_sources,
libipa_headers, libipa_headers,
libipa_sources, libipa_sources,
] ]
if is_variable('ipu3_ipa_sources') if is_variable('ipu3_ipa_sources')
doxygen_internal_input += [ipu3_ipa_sources] doxygen_input += [ipu3_ipa_sources]
endif endif
# We run doxygen twice - the first run excludes internal API objects as it custom_target('doxygen',
# is intended to document the public API only. A second run covers all of input : doxygen_input,
# the library's objects for libcamera developers. Common configuration is
# set in an initially generated Doxyfile, which is then included by the two
# final Doxyfiles.
# This is the "public" run of doxygen generating an abridged version of the
# API's documentation.
doxyfile_tmpl = configure_file(input : 'Doxyfile-public.in',
output : 'Doxyfile-public.tmpl',
configuration : cdata)
# The set of public input files stored in the doxygen_public_input array
# needs to be set in Doxyfile public. We can't pass them through cdata
# cdata, as some of the array members are custom_tgt instances, which
# configuration_data.set() doesn't support. Using a separate script invoked
# through custom_target(), which supports custom_tgt instances as inputs.
doxyfile = custom_target('doxyfile-public',
input : [
doxygen_public_input,
],
output : 'Doxyfile-public',
command : [
'gen-doxyfile.py',
'-o', '@OUTPUT@',
doxyfile_tmpl,
'@INPUT@',
])
custom_target('doxygen-public',
input : [
doxyfile,
doxyfile_common,
],
output : 'api-html', output : 'api-html',
command : [doxygen, doxyfile], command : [doxygen, doxyfile],
install : true, install : true,
install_dir : doc_install_dir, install_dir : doc_install_dir,
install_tag : 'doc') install_tag : 'doc')
# This is the internal documentation, which hard-codes a list of directories
# to parse in its doxyfile.
doxyfile = configure_file(input : 'Doxyfile-internal.in',
output : 'Doxyfile-internal',
configuration : cdata)
custom_target('doxygen-internal',
input : [
doxyfile,
doxyfile_common,
doxygen_internal_input,
],
output : 'internal-api-html',
command : [doxygen, doxyfile],
install : true,
install_dir : doc_install_dir,
install_tag : 'doc-internal')
endif endif
# #
# Sphinx # Sphinx
# #
sphinx = find_program('sphinx-build-3', 'sphinx-build', sphinx = find_program('sphinx-build-3', required : false)
required : get_option('documentation')) if not sphinx.found()
sphinx = find_program('sphinx-build', required : get_option('documentation'))
endif
if sphinx.found() if sphinx.found()
docs_sources = [ docs_sources = [
@ -126,19 +70,15 @@ if sphinx.found()
'coding-style.rst', 'coding-style.rst',
'conf.py', 'conf.py',
'contributing.rst', 'contributing.rst',
'design/ae.rst', 'docs.rst',
'documentation-contents.rst',
'environment_variables.rst', 'environment_variables.rst',
'feature_requirements.rst',
'guides/application-developer.rst', 'guides/application-developer.rst',
'guides/introduction.rst',
'guides/ipa.rst', 'guides/ipa.rst',
'guides/pipeline-handler.rst', 'guides/pipeline-handler.rst',
'guides/tracing.rst', 'guides/tracing.rst',
'index.rst', 'index.rst',
'introduction.rst',
'lens_driver_requirements.rst', 'lens_driver_requirements.rst',
'libcamera_architecture.rst',
'mali-c55.dot',
'python-bindings.rst', 'python-bindings.rst',
'sensor_driver_requirements.rst', 'sensor_driver_requirements.rst',
'software-isp-benchmarking.rst', 'software-isp-benchmarking.rst',

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _python-bindings: .. _python-bindings:
Python Bindings for libcamera Python Bindings for libcamera

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _sensor-driver-requirements: .. _sensor-driver-requirements:
Sensor Driver Requirements Sensor Driver Requirements

View file

@ -1,7 +1,5 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. include:: documentation-contents.rst
.. _software-isp-benchmarking: .. _software-isp-benchmarking:
Software ISP benchmarking Software ISP benchmarking

View file

@ -283,13 +283,9 @@ div#signature {
font-size: 12px; font-size: 12px;
} }
#licensing div.toctree-wrapper { #libcamera div.toctree-wrapper {
height: 0px; height: 0px;
margin: 0px; margin: 0px;
padding: 0px; padding: 0px;
visibility: hidden; visibility: hidden;
} }
.documentation-nav {
display: none;
}

View file

@ -1,44 +0,0 @@
/**
* \page thread-safety Reentrancy and Thread-Safety
*
* Through the documentation, several terms are used to define how classes and
* their member functions can be used from multiple threads.
*
* - A **reentrant** function may be called simultaneously from multiple
* threads if and only if each invocation uses a different instance of the
* class. This is the default for all member functions not explictly marked
* otherwise.
*
* - \anchor thread-safe A **thread-safe** function may be called
* simultaneously from multiple threads on the same instance of a class. A
* thread-safe function is thus reentrant. Thread-safe functions may also be
* called simultaneously with any other reentrant function of the same class
* on the same instance.
*
* \internal
* - \anchor thread-bound A **thread-bound** function may be called only from
* the thread that the class instances lives in (see section \ref
* thread-objects). For instances of classes that do not derive from the
* Object class, this is the thread in which the instance was created. A
* thread-bound function is not thread-safe, and may or may not be reentrant.
* \endinternal
*
* Neither reentrancy nor thread-safety, in this context, mean that a function
* may be called simultaneously from the same thread, for instance from a
* callback invoked by the function. This may deadlock and isn't allowed unless
* separately documented.
*
* \if internal
* A class is defined as reentrant, thread-safe or thread-bound if all its
* member functions are reentrant, thread-safe or thread-bound respectively.
* \else
* A class is defined as reentrant or thread-safe if all its member functions
* are reentrant or thread-safe respectively.
* \endif
* Some member functions may additionally be documented as having additional
* thread-related attributes.
*
* Most classes are reentrant but not thread-safe, as making them fully
* thread-safe would incur locking costs considered prohibitive for the
* expected use cases.
*/

View file

@ -1,5 +1,7 @@
.. SPDX-License-Identifier: CC-BY-SA-4.0 .. SPDX-License-Identifier: CC-BY-SA-4.0
.. section-begin-libcamera
=========== ===========
libcamera libcamera
=========== ===========
@ -20,6 +22,7 @@ open-source-friendly while still protecting vendor core IP. libcamera was born
out of that collaboration and will offer modern camera support to Linux-based out of that collaboration and will offer modern camera support to Linux-based
systems, including traditional Linux distributions, ChromeOS and Android. systems, including traditional Linux distributions, ChromeOS and Android.
.. section-end-libcamera
.. section-begin-getting-started .. section-begin-getting-started
Getting Started Getting Started
@ -44,7 +47,7 @@ A C++ toolchain: [required]
Either {g++, clang} Either {g++, clang}
Meson Build system: [required] Meson Build system: [required]
meson (>= 0.63) ninja-build pkg-config meson (>= 0.60) ninja-build pkg-config
for the libcamera core: [required] for the libcamera core: [required]
libyaml-dev python3-yaml python3-ply python3-jinja2 libyaml-dev python3-yaml python3-ply python3-jinja2
@ -83,10 +86,9 @@ for cam: [optional]
- libdrm-dev: Enables the KMS sink - libdrm-dev: Enables the KMS sink
- libjpeg-dev: Enables MJPEG on the SDL sink - libjpeg-dev: Enables MJPEG on the SDL sink
- libsdl2-dev: Enables the SDL sink - libsdl2-dev: Enables the SDL sink
- libtiff-dev: Enables writing DNG
for qcam: [optional] for qcam: [optional]
libtiff-dev qt6-base-dev libtiff-dev qtbase5-dev qttools5-dev-tools
for tracing with lttng: [optional] for tracing with lttng: [optional]
liblttng-ust-dev python3-jinja2 lttng-tools liblttng-ust-dev python3-jinja2 lttng-tools
@ -94,6 +96,9 @@ for tracing with lttng: [optional]
for android: [optional] for android: [optional]
libexif-dev libjpeg-dev libexif-dev libjpeg-dev
for Python bindings: [optional]
pybind11-dev
for lc-compliance: [optional] for lc-compliance: [optional]
libevent-dev libgtest-dev libevent-dev libgtest-dev
@ -173,22 +178,6 @@ Which can be received on another device over the network with:
gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \ gst-launch-1.0 tcpclientsrc host=$DEVICE_IP port=5000 ! \
multipartdemux ! jpegdec ! autovideosink multipartdemux ! jpegdec ! autovideosink
The GStreamer element also supports multiple streams. This is achieved by
requesting additional source pads. Downstream caps filters can be used
to choose specific parameters like resolution and pixel format. The pad
property ``stream-role`` can be used to select a role.
The following example displays a 640x480 view finder while streaming JPEG
encoded 800x600 video. You can use the receiver pipeline above to view the
remote stream from another device.
.. code::
gst-launch-1.0 libcamerasrc name=cs src::stream-role=view-finder src_0::stream-role=video-recording \
cs.src ! queue ! video/x-raw,width=640,height=480 ! videoconvert ! autovideosink \
cs.src_0 ! queue ! video/x-raw,width=800,height=600 ! videoconvert ! \
jpegenc ! multipartmux ! tcpserversink host=0.0.0.0 port=5000
.. section-end-getting-started .. section-end-getting-started
Troubleshooting Troubleshooting

View file

@ -98,15 +98,21 @@ public:
using PackType = BoundMethodPack<R, Args...>; using PackType = BoundMethodPack<R, Args...>;
private: private:
template<std::size_t... I> template<std::size_t... I, typename T = R>
void invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>) std::enable_if_t<!std::is_void<T>::value, void>
invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
{ {
[[maybe_unused]] auto *args = static_cast<PackType *>(pack); PackType *args = static_cast<PackType *>(pack);
args->ret_ = invoke(std::get<I>(args->args_)...);
}
if constexpr (!std::is_void_v<R>) template<std::size_t... I, typename T = R>
args->ret_ = invoke(std::get<I>(args->args_)...); std::enable_if_t<std::is_void<T>::value, void>
else invokePack(BoundMethodPackBase *pack, std::index_sequence<I...>)
invoke(std::get<I>(args->args_)...); {
/* args is effectively unused when the sequence I is empty. */
PackType *args [[gnu::unused]] = static_cast<PackType *>(pack);
invoke(std::get<I>(args->args_)...);
} }
public: public:

View file

@ -0,0 +1,14 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2021, Google Inc.
*
* Compiler support
*/
#pragma once
#if __cplusplus >= 201703L
#define __nodiscard [[nodiscard]]
#else
#define __nodiscard
#endif

View file

@ -7,6 +7,8 @@
#pragma once #pragma once
#include <vector>
#include <libcamera/base/private.h> #include <libcamera/base/private.h>
namespace libcamera { namespace libcamera {

View file

@ -7,11 +7,11 @@
#pragma once #pragma once
#include <map>
#include <stdint.h>
#include <string>
#include <sys/types.h> #include <sys/types.h>
#include <map>
#include <string>
#include <libcamera/base/private.h> #include <libcamera/base/private.h>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>

View file

@ -7,9 +7,8 @@
#pragma once #pragma once
#include <atomic> #include <chrono>
#include <sstream> #include <sstream>
#include <string_view>
#include <libcamera/base/private.h> #include <libcamera/base/private.h>
@ -30,29 +29,25 @@ enum LogSeverity {
class LogCategory class LogCategory
{ {
public: public:
static LogCategory *create(std::string_view name); static LogCategory *create(const char *name);
const std::string &name() const { return name_; } const std::string &name() const { return name_; }
LogSeverity severity() const { return severity_.load(std::memory_order_relaxed); } LogSeverity severity() const { return severity_; }
void setSeverity(LogSeverity severity) { severity_.store(severity, std::memory_order_relaxed); } void setSeverity(LogSeverity severity);
static const LogCategory &defaultCategory(); static const LogCategory &defaultCategory();
private: private:
friend class Logger; explicit LogCategory(const char *name);
explicit LogCategory(std::string_view name);
const std::string name_; const std::string name_;
LogSeverity severity_;
std::atomic<LogSeverity> severity_;
static_assert(decltype(severity_)::is_always_lock_free);
}; };
#define LOG_DECLARE_CATEGORY(name) \ #define LOG_DECLARE_CATEGORY(name) \
extern const LogCategory &_LOG_CATEGORY(name)(); extern const LogCategory &_LOG_CATEGORY(name)();
#define LOG_DEFINE_CATEGORY(name) \ #define LOG_DEFINE_CATEGORY(name) \
LOG_DECLARE_CATEGORY(name) \
const LogCategory &_LOG_CATEGORY(name)() \ const LogCategory &_LOG_CATEGORY(name)() \
{ \ { \
/* The instance will be deleted by the Logger destructor. */ \ /* The instance will be deleted by the Logger destructor. */ \
@ -65,7 +60,9 @@ class LogMessage
public: public:
LogMessage(const char *fileName, unsigned int line, LogMessage(const char *fileName, unsigned int line,
const LogCategory &category, LogSeverity severity, const LogCategory &category, LogSeverity severity,
std::string prefix = {}); const std::string &prefix = std::string());
LogMessage(LogMessage &&);
~LogMessage(); ~LogMessage();
std::ostream &stream() { return msgStream_; } std::ostream &stream() { return msgStream_; }
@ -78,7 +75,9 @@ public:
const std::string msg() const { return msgStream_.str(); } const std::string msg() const { return msgStream_.str(); }
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(LogMessage) LIBCAMERA_DISABLE_COPY(LogMessage)
void init(const char *fileName, unsigned int line);
std::ostringstream msgStream_; std::ostringstream msgStream_;
const LogCategory &category_; const LogCategory &category_;

View file

@ -1,32 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Anonymous file creation
*/
#pragma once
#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class MemFd
{
public:
enum class Seal {
None = 0,
Shrink = (1 << 0),
Grow = (1 << 1),
};
using Seals = Flags<Seal>;
static UniqueFD create(const char *name, std::size_t size,
Seals seals = Seal::None);
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(MemFd::Seal)
} /* namespace libcamera */

View file

@ -5,6 +5,7 @@ libcamera_base_include_dir = libcamera_include_dir / 'base'
libcamera_base_public_headers = files([ libcamera_base_public_headers = files([
'bound_method.h', 'bound_method.h',
'class.h', 'class.h',
'compiler.h',
'flags.h', 'flags.h',
'object.h', 'object.h',
'shared_fd.h', 'shared_fd.h',
@ -20,7 +21,6 @@ libcamera_base_private_headers = files([
'event_notifier.h', 'event_notifier.h',
'file.h', 'file.h',
'log.h', 'log.h',
'memfd.h',
'message.h', 'message.h',
'mutex.h', 'mutex.h',
'private.h', 'private.h',

View file

@ -23,6 +23,10 @@ namespace libcamera {
class LIBCAMERA_TSA_CAPABILITY("mutex") Mutex final class LIBCAMERA_TSA_CAPABILITY("mutex") Mutex final
{ {
public: public:
constexpr Mutex()
{
}
void lock() LIBCAMERA_TSA_ACQUIRE() void lock() LIBCAMERA_TSA_ACQUIRE()
{ {
mutex_.lock(); mutex_.lock();
@ -80,6 +84,10 @@ private:
class ConditionVariable final class ConditionVariable final
{ {
public: public:
ConditionVariable()
{
}
void notify_one() noexcept void notify_one() noexcept
{ {
cv_.notify_one(); cv_.notify_one();

View file

@ -9,11 +9,9 @@
#include <list> #include <list>
#include <memory> #include <memory>
#include <utility>
#include <vector> #include <vector>
#include <libcamera/base/bound_method.h> #include <libcamera/base/bound_method.h>
#include <libcamera/base/class.h>
namespace libcamera { namespace libcamera {
@ -40,7 +38,7 @@ public:
{ {
T *obj = static_cast<T *>(this); T *obj = static_cast<T *>(this);
auto *method = new BoundMethodMember<T, R, FuncArgs...>(obj, this, func, type); auto *method = new BoundMethodMember<T, R, FuncArgs...>(obj, this, func, type);
return method->activate(std::forward<Args>(args)..., true); return method->activate(args..., true);
} }
Thread *thread() const { return thread_; } Thread *thread() const { return thread_; }
@ -54,8 +52,6 @@ protected:
bool assertThreadBound(const char *message); bool assertThreadBound(const char *message);
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Object)
friend class SignalBase; friend class SignalBase;
friend class Thread; friend class Thread;

View file

@ -10,6 +10,7 @@
#include <functional> #include <functional>
#include <list> #include <list>
#include <type_traits> #include <type_traits>
#include <vector>
#include <libcamera/base/bound_method.h> #include <libcamera/base/bound_method.h>
@ -63,8 +64,11 @@ public:
#ifndef __DOXYGEN__ #ifndef __DOXYGEN__
template<typename T, typename Func, template<typename T, typename Func,
std::enable_if_t<std::is_base_of<Object, T>::value && std::enable_if_t<std::is_base_of<Object, T>::value
std::is_invocable_v<Func, Args...>> * = nullptr> #if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto) void connect(T *obj, Func func, ConnectionType type = ConnectionTypeAuto)
{ {
Object *object = static_cast<Object *>(obj); Object *object = static_cast<Object *>(obj);
@ -72,8 +76,11 @@ public:
} }
template<typename T, typename Func, template<typename T, typename Func,
std::enable_if_t<!std::is_base_of<Object, T>::value && std::enable_if_t<!std::is_base_of<Object, T>::value
std::is_invocable_v<Func, Args...>> * = nullptr> #if __cplusplus >= 201703L
&& std::is_invocable_v<Func, Args...>
#endif
> * = nullptr>
#else #else
template<typename T, typename Func> template<typename T, typename Func>
#endif #endif

View file

@ -10,6 +10,7 @@
#include <array> #include <array>
#include <iterator> #include <iterator>
#include <limits> #include <limits>
#include <stddef.h>
#include <type_traits> #include <type_traits>
namespace libcamera { namespace libcamera {
@ -346,7 +347,13 @@ public:
} }
constexpr Span(const Span &other) noexcept = default; constexpr Span(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept = default;
constexpr Span &operator=(const Span &other) noexcept
{
data_ = other.data_;
size_ = other.size_;
return *this;
}
constexpr iterator begin() const { return data(); } constexpr iterator begin() const { return data(); }
constexpr const_iterator cbegin() const { return begin(); } constexpr const_iterator cbegin() const { return begin(); }

View file

@ -13,10 +13,8 @@
#include <libcamera/base/private.h> #include <libcamera/base/private.h>
#include <libcamera/base/class.h>
#include <libcamera/base/message.h> #include <libcamera/base/message.h>
#include <libcamera/base/signal.h> #include <libcamera/base/signal.h>
#include <libcamera/base/span.h>
#include <libcamera/base/utils.h> #include <libcamera/base/utils.h>
namespace libcamera { namespace libcamera {
@ -37,8 +35,6 @@ public:
void exit(int code = 0); void exit(int code = 0);
bool wait(utils::duration duration = utils::duration::max()); bool wait(utils::duration duration = utils::duration::max());
int setThreadAffinity(const Span<const unsigned int> &cpus);
bool isRunning(); bool isRunning();
Signal<> finished; Signal<> finished;
@ -48,21 +44,16 @@ public:
EventDispatcher *eventDispatcher(); EventDispatcher *eventDispatcher();
void dispatchMessages(Message::Type type = Message::Type::None, void dispatchMessages(Message::Type type = Message::Type::None);
Object *receiver = nullptr);
protected: protected:
int exec(); int exec();
virtual void run(); virtual void run();
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Thread)
void startThread(); void startThread();
void finishThread(); void finishThread();
void setThreadAffinityInternal();
void postMessage(std::unique_ptr<Message> msg, Object *receiver); void postMessage(std::unique_ptr<Message> msg, Object *receiver);
void removeMessages(Object *receiver); void removeMessages(Object *receiver);

View file

@ -8,6 +8,7 @@
#pragma once #pragma once
#include <chrono> #include <chrono>
#include <stdint.h>
#include <libcamera/base/private.h> #include <libcamera/base/private.h>

View file

@ -10,6 +10,7 @@
#include <utility> #include <utility>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
#include <libcamera/base/compiler.h>
namespace libcamera { namespace libcamera {
@ -42,7 +43,7 @@ public:
return *this; return *this;
} }
[[nodiscard]] int release() __nodiscard int release()
{ {
int fd = fd_; int fd = fd_;
fd_ = -1; fd_ = -1;

View file

@ -9,13 +9,12 @@
#include <algorithm> #include <algorithm>
#include <chrono> #include <chrono>
#include <functional>
#include <iterator> #include <iterator>
#include <memory>
#include <ostream> #include <ostream>
#include <sstream> #include <sstream>
#include <stdint.h>
#include <string.h>
#include <string> #include <string>
#include <string.h>
#include <sys/time.h> #include <sys/time.h>
#include <type_traits> #include <type_traits>
#include <utility> #include <utility>
@ -91,30 +90,6 @@ template<typename T,
_hex hex(T value, unsigned int width = 0); _hex hex(T value, unsigned int width = 0);
#ifndef __DOXYGEN__ #ifndef __DOXYGEN__
template<>
inline _hex hex<int8_t>(int8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<uint8_t>(uint8_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 2 };
}
template<>
inline _hex hex<int16_t>(int16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<>
inline _hex hex<uint16_t>(uint16_t value, unsigned int width)
{
return { static_cast<uint64_t>(value), width ? width : 4 };
}
template<> template<>
inline _hex hex<int32_t>(int32_t value, unsigned int width) inline _hex hex<int32_t>(int32_t value, unsigned int width)
{ {
@ -205,16 +180,7 @@ public:
iterator &operator++(); iterator &operator++();
std::string operator*() const; std::string operator*() const;
bool operator!=(const iterator &other) const;
bool operator==(const iterator &other) const
{
return pos_ == other.pos_;
}
bool operator!=(const iterator &other) const
{
return !(*this == other);
}
private: private:
const StringSplitter *ss_; const StringSplitter *ss_;
@ -222,15 +188,8 @@ public:
std::string::size_type next_; std::string::size_type next_;
}; };
iterator begin() const iterator begin() const;
{ iterator end() const;
return { this, 0 };
}
iterator end() const
{
return { this, std::string::npos };
}
private: private:
std::string str_; std::string str_;
@ -416,18 +375,6 @@ constexpr std::underlying_type_t<Enum> to_underlying(Enum e) noexcept
return static_cast<std::underlying_type_t<Enum>>(e); return static_cast<std::underlying_type_t<Enum>>(e);
} }
class ScopeExitActions
{
public:
~ScopeExitActions();
void operator+=(std::function<void()> &&action);
void release();
private:
std::vector<std::function<void()>> actions_;
};
} /* namespace utils */ } /* namespace utils */
#ifndef __DOXYGEN__ #ifndef __DOXYGEN__

View file

@ -9,7 +9,6 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include <string_view>
#include <sys/types.h> #include <sys/types.h>
#include <vector> #include <vector>
@ -32,7 +31,7 @@ public:
void stop(); void stop();
std::vector<std::shared_ptr<Camera>> cameras() const; std::vector<std::shared_ptr<Camera>> cameras() const;
std::shared_ptr<Camera> get(std::string_view id); std::shared_ptr<Camera> get(const std::string &id);
static const std::string &version() { return version_; } static const std::string &version() { return version_; }

View file

@ -2,7 +2,7 @@
/* /*
* Copyright (C) 2019, Google Inc. * Copyright (C) 2019, Google Inc.
* *
* {{mode|capitalize}} ID list * Control ID list
* *
* This file is auto-generated. Do not edit. * This file is auto-generated. Do not edit.
*/ */
@ -18,44 +18,18 @@
namespace libcamera { namespace libcamera {
namespace {{mode}} { namespace controls {
extern const ControlIdMap {{mode}};
{%- for vendor, ctrls in controls -%}
{% if vendor != 'libcamera' %}
namespace {{vendor}} {
#define LIBCAMERA_HAS_{{vendor|upper}}_VENDOR_{{mode|upper}}
{%- endif %}
{% if ctrls %}
enum { enum {
{%- for ctrl in ctrls %} ${ids}
{{ctrl.name|snake_case|upper}} = {{ctrl.id}},
{%- endfor %}
}; };
{% endif %}
{% for ctrl in ctrls -%} ${controls}
{% if ctrl.is_enum -%}
enum {{ctrl.name}}Enum {
{%- for enum in ctrl.enum_values %}
{{enum.name}} = {{enum.value}},
{%- endfor %}
};
extern const std::array<const ControlValue, {{ctrl.enum_values_count}}> {{ctrl.name}}Values;
extern const std::map<std::string, {{ctrl.type}}> {{ctrl.name}}NameValueMap;
{% endif -%}
extern const Control<{{ctrl.type}}> {{ctrl.name}};
{% endfor -%}
{% if vendor != 'libcamera' %} extern const ControlIdMap controls;
} /* namespace {{vendor}} */
{% endif -%}
{% endfor %} ${vendor_controls}
} /* namespace {{mode}} */
} /* namespace controls */
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -8,7 +8,6 @@
#pragma once #pragma once
#include <assert.h> #include <assert.h>
#include <map>
#include <optional> #include <optional>
#include <set> #include <set>
#include <stdint.h> #include <stdint.h>
@ -17,7 +16,6 @@
#include <vector> #include <vector>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/span.h> #include <libcamera/base/span.h>
#include <libcamera/geometry.h> #include <libcamera/geometry.h>
@ -30,102 +28,67 @@ enum ControlType {
ControlTypeNone, ControlTypeNone,
ControlTypeBool, ControlTypeBool,
ControlTypeByte, ControlTypeByte,
ControlTypeUnsigned16,
ControlTypeUnsigned32,
ControlTypeInteger32, ControlTypeInteger32,
ControlTypeInteger64, ControlTypeInteger64,
ControlTypeFloat, ControlTypeFloat,
ControlTypeString, ControlTypeString,
ControlTypeRectangle, ControlTypeRectangle,
ControlTypeSize, ControlTypeSize,
ControlTypePoint,
}; };
namespace details { namespace details {
template<typename T, typename = std::void_t<>> template<typename T>
struct control_type { struct control_type {
}; };
template<> template<>
struct control_type<void> { struct control_type<void> {
static constexpr ControlType value = ControlTypeNone; static constexpr ControlType value = ControlTypeNone;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<bool> { struct control_type<bool> {
static constexpr ControlType value = ControlTypeBool; static constexpr ControlType value = ControlTypeBool;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<uint8_t> { struct control_type<uint8_t> {
static constexpr ControlType value = ControlTypeByte; static constexpr ControlType value = ControlTypeByte;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint16_t> {
static constexpr ControlType value = ControlTypeUnsigned16;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<uint32_t> {
static constexpr ControlType value = ControlTypeUnsigned32;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<int32_t> { struct control_type<int32_t> {
static constexpr ControlType value = ControlTypeInteger32; static constexpr ControlType value = ControlTypeInteger32;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<int64_t> { struct control_type<int64_t> {
static constexpr ControlType value = ControlTypeInteger64; static constexpr ControlType value = ControlTypeInteger64;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<float> { struct control_type<float> {
static constexpr ControlType value = ControlTypeFloat; static constexpr ControlType value = ControlTypeFloat;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<std::string> { struct control_type<std::string> {
static constexpr ControlType value = ControlTypeString; static constexpr ControlType value = ControlTypeString;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<Rectangle> { struct control_type<Rectangle> {
static constexpr ControlType value = ControlTypeRectangle; static constexpr ControlType value = ControlTypeRectangle;
static constexpr std::size_t size = 0;
}; };
template<> template<>
struct control_type<Size> { struct control_type<Size> {
static constexpr ControlType value = ControlTypeSize; static constexpr ControlType value = ControlTypeSize;
static constexpr std::size_t size = 0;
};
template<>
struct control_type<Point> {
static constexpr ControlType value = ControlTypePoint;
static constexpr std::size_t size = 0;
}; };
template<typename T, std::size_t N> template<typename T, std::size_t N>
struct control_type<Span<T, N>, std::enable_if_t<control_type<std::remove_cv_t<T>>::size == 0>> : public control_type<std::remove_cv_t<T>> { struct control_type<Span<T, N>> : public control_type<std::remove_cv_t<T>> {
static constexpr std::size_t size = N;
};
template<typename T>
struct control_type<T, std::enable_if_t<std::is_enum_v<T> && sizeof(T) == sizeof(int32_t)>> : public control_type<int32_t> {
}; };
} /* namespace details */ } /* namespace details */
@ -250,44 +213,23 @@ private:
class ControlId class ControlId
{ {
public: public:
enum class Direction { ControlId(unsigned int id, const std::string &name, ControlType type)
In = (1 << 0), : id_(id), name_(name), type_(type)
Out = (1 << 1), {
}; }
using DirectionFlags = Flags<Direction>;
ControlId(unsigned int id, const std::string &name, const std::string &vendor,
ControlType type, DirectionFlags direction,
std::size_t size = 0,
const std::map<std::string, int32_t> &enumStrMap = {});
unsigned int id() const { return id_; } unsigned int id() const { return id_; }
const std::string &name() const { return name_; } const std::string &name() const { return name_; }
const std::string &vendor() const { return vendor_; }
ControlType type() const { return type_; } ControlType type() const { return type_; }
DirectionFlags direction() const { return direction_; }
bool isInput() const { return !!(direction_ & Direction::In); }
bool isOutput() const { return !!(direction_ & Direction::Out); }
bool isArray() const { return size_ > 0; }
std::size_t size() const { return size_; }
const std::map<int32_t, std::string> &enumerators() const { return reverseMap_; }
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId) LIBCAMERA_DISABLE_COPY_AND_MOVE(ControlId)
unsigned int id_; unsigned int id_;
std::string name_; std::string name_;
std::string vendor_;
ControlType type_; ControlType type_;
DirectionFlags direction_;
std::size_t size_;
std::map<std::string, int32_t> enumStrMap_;
std::map<int32_t, std::string> reverseMap_;
}; };
LIBCAMERA_FLAGS_ENABLE_OPERATORS(ControlId::Direction)
static inline bool operator==(unsigned int lhs, const ControlId &rhs) static inline bool operator==(unsigned int lhs, const ControlId &rhs)
{ {
return lhs == rhs.id(); return lhs == rhs.id();
@ -314,11 +256,8 @@ class Control : public ControlId
public: public:
using type = T; using type = T;
Control(unsigned int id, const char *name, const char *vendor, Control(unsigned int id, const char *name)
ControlId::DirectionFlags direction, : ControlId(id, name, details::control_type<std::remove_cv_t<T>>::value)
const std::map<std::string, int32_t> &enumStrMap = {})
: ControlId(id, name, vendor, details::control_type<std::remove_cv_t<T>>::value,
direction, details::control_type<std::remove_cv_t<T>>::size, enumStrMap)
{ {
} }

View file

@ -7,6 +7,7 @@
#pragma once #pragma once
#include <assert.h>
#include <limits> #include <limits>
#include <memory> #include <memory>
#include <stdint.h> #include <stdint.h>
@ -26,7 +27,6 @@ struct FrameMetadata {
FrameSuccess, FrameSuccess,
FrameError, FrameError,
FrameCancelled, FrameCancelled,
FrameStartup,
}; };
struct Plane { struct Plane {

View file

@ -11,6 +11,8 @@
#include <ostream> #include <ostream>
#include <string> #include <string>
#include <libcamera/base/compiler.h>
namespace libcamera { namespace libcamera {
class Rectangle; class Rectangle;
@ -108,8 +110,8 @@ public:
return *this; return *this;
} }
[[nodiscard]] constexpr Size alignedDownTo(unsigned int hAlignment, __nodiscard constexpr Size alignedDownTo(unsigned int hAlignment,
unsigned int vAlignment) const unsigned int vAlignment) const
{ {
return { return {
width / hAlignment * hAlignment, width / hAlignment * hAlignment,
@ -117,8 +119,8 @@ public:
}; };
} }
[[nodiscard]] constexpr Size alignedUpTo(unsigned int hAlignment, __nodiscard constexpr Size alignedUpTo(unsigned int hAlignment,
unsigned int vAlignment) const unsigned int vAlignment) const
{ {
return { return {
(width + hAlignment - 1) / hAlignment * hAlignment, (width + hAlignment - 1) / hAlignment * hAlignment,
@ -126,7 +128,7 @@ public:
}; };
} }
[[nodiscard]] constexpr Size boundedTo(const Size &bound) const __nodiscard constexpr Size boundedTo(const Size &bound) const
{ {
return { return {
std::min(width, bound.width), std::min(width, bound.width),
@ -134,7 +136,7 @@ public:
}; };
} }
[[nodiscard]] constexpr Size expandedTo(const Size &expand) const __nodiscard constexpr Size expandedTo(const Size &expand) const
{ {
return { return {
std::max(width, expand.width), std::max(width, expand.width),
@ -142,7 +144,7 @@ public:
}; };
} }
[[nodiscard]] constexpr Size grownBy(const Size &margins) const __nodiscard constexpr Size grownBy(const Size &margins) const
{ {
return { return {
width + margins.width, width + margins.width,
@ -150,7 +152,7 @@ public:
}; };
} }
[[nodiscard]] constexpr Size shrunkBy(const Size &margins) const __nodiscard constexpr Size shrunkBy(const Size &margins) const
{ {
return { return {
width > margins.width ? width - margins.width : 0, width > margins.width ? width - margins.width : 0,
@ -158,10 +160,10 @@ public:
}; };
} }
[[nodiscard]] Size boundedToAspectRatio(const Size &ratio) const; __nodiscard Size boundedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Size expandedToAspectRatio(const Size &ratio) const; __nodiscard Size expandedToAspectRatio(const Size &ratio) const;
[[nodiscard]] Rectangle centeredTo(const Point &center) const; __nodiscard Rectangle centeredTo(const Point &center) const;
Size operator*(float factor) const; Size operator*(float factor) const;
Size operator/(float factor) const; Size operator/(float factor) const;
@ -260,15 +262,6 @@ public:
{ {
} }
constexpr Rectangle(const Point &point1, const Point &point2)
: Rectangle(std::min(point1.x, point2.x), std::min(point1.y, point2.y),
static_cast<unsigned int>(std::max(point1.x, point2.x)) -
static_cast<unsigned int>(std::min(point1.x, point2.x)),
static_cast<unsigned int>(std::max(point1.y, point2.y)) -
static_cast<unsigned int>(std::min(point1.y, point2.y)))
{
}
int x; int x;
int y; int y;
unsigned int width; unsigned int width;
@ -292,14 +285,11 @@ public:
Rectangle &scaleBy(const Size &numerator, const Size &denominator); Rectangle &scaleBy(const Size &numerator, const Size &denominator);
Rectangle &translateBy(const Point &point); Rectangle &translateBy(const Point &point);
[[nodiscard]] Rectangle boundedTo(const Rectangle &bound) const; __nodiscard Rectangle boundedTo(const Rectangle &bound) const;
[[nodiscard]] Rectangle enclosedIn(const Rectangle &boundary) const; __nodiscard Rectangle enclosedIn(const Rectangle &boundary) const;
[[nodiscard]] Rectangle scaledBy(const Size &numerator, __nodiscard Rectangle scaledBy(const Size &numerator,
const Size &denominator) const; const Size &denominator) const;
[[nodiscard]] Rectangle translatedBy(const Point &point) const; __nodiscard Rectangle translatedBy(const Point &point) const;
Rectangle transformedBetween(const Rectangle &source,
const Rectangle &target) const;
}; };
bool operator==(const Rectangle &lhs, const Rectangle &rhs); bool operator==(const Rectangle &lhs, const Rectangle &rhs);

View file

@ -11,7 +11,6 @@
#include <list> #include <list>
#include <memory> #include <memory>
#include <set> #include <set>
#include <stdint.h>
#include <string> #include <string>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
@ -33,7 +32,6 @@ public:
~Private(); ~Private();
PipelineHandler *pipe() { return pipe_.get(); } PipelineHandler *pipe() { return pipe_.get(); }
const PipelineHandler *pipe() const { return pipe_.get(); }
std::list<Request *> queuedRequests_; std::list<Request *> queuedRequests_;
ControlInfoMap controlInfo_; ControlInfoMap controlInfo_;

View file

@ -7,7 +7,6 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include <stdint.h>
#include <string> #include <string>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>

View file

@ -9,6 +9,7 @@
#include <libcamera/camera_manager.h> #include <libcamera/camera_manager.h>
#include <map>
#include <memory> #include <memory>
#include <sys/types.h> #include <sys/types.h>
#include <vector> #include <vector>
@ -18,14 +19,13 @@
#include <libcamera/base/thread.h> #include <libcamera/base/thread.h>
#include <libcamera/base/thread_annotations.h> #include <libcamera/base/thread_annotations.h>
#include "libcamera/internal/ipa_manager.h"
#include "libcamera/internal/process.h" #include "libcamera/internal/process.h"
namespace libcamera { namespace libcamera {
class Camera; class Camera;
class DeviceEnumerator; class DeviceEnumerator;
class IPAManager;
class PipelineHandlerFactoryBase;
class CameraManager::Private : public Extensible::Private, public Thread class CameraManager::Private : public Extensible::Private, public Thread
{ {
@ -38,8 +38,6 @@ public:
void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_); void addCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_); void removeCamera(std::shared_ptr<Camera> camera) LIBCAMERA_TSA_EXCLUDES(mutex_);
IPAManager *ipaManager() const { return ipaManager_.get(); }
protected: protected:
void run() override; void run() override;
@ -64,7 +62,7 @@ private:
std::unique_ptr<DeviceEnumerator> enumerator_; std::unique_ptr<DeviceEnumerator> enumerator_;
std::unique_ptr<IPAManager> ipaManager_; IPAManager ipaManager_;
ProcessManager processManager_; ProcessManager processManager_;
}; };

View file

@ -8,12 +8,11 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include <stdint.h>
#include <string> #include <string>
#include <variant>
#include <vector> #include <vector>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
#include <libcamera/base/log.h>
#include <libcamera/control_ids.h> #include <libcamera/control_ids.h>
#include <libcamera/controls.h> #include <libcamera/controls.h>
@ -21,8 +20,10 @@
#include <libcamera/orientation.h> #include <libcamera/orientation.h>
#include <libcamera/transform.h> #include <libcamera/transform.h>
#include <libcamera/ipa/core_ipa_interface.h>
#include "libcamera/internal/bayer_format.h" #include "libcamera/internal/bayer_format.h"
#include "libcamera/internal/camera_sensor_properties.h" #include "libcamera/internal/formats.h"
#include "libcamera/internal/v4l2_subdevice.h" #include "libcamera/internal/v4l2_subdevice.h"
namespace libcamera { namespace libcamera {
@ -31,101 +32,95 @@ class CameraLens;
class MediaEntity; class MediaEntity;
class SensorConfiguration; class SensorConfiguration;
struct CameraSensorProperties;
enum class Orientation; enum class Orientation;
struct IPACameraSensorInfo; class CameraSensor : protected Loggable
class CameraSensor
{ {
public: public:
virtual ~CameraSensor(); explicit CameraSensor(const MediaEntity *entity);
~CameraSensor();
virtual const std::string &model() const = 0; int init();
virtual const std::string &id() const = 0;
virtual const MediaEntity *entity() const = 0; const std::string &model() const { return model_; }
virtual V4L2Subdevice *device() = 0; const std::string &id() const { return id_; }
virtual CameraLens *focusLens() = 0; const MediaEntity *entity() const { return entity_; }
V4L2Subdevice *device() { return subdev_.get(); }
virtual const std::vector<unsigned int> &mbusCodes() const = 0; CameraLens *focusLens() { return focusLens_.get(); }
virtual std::vector<Size> sizes(unsigned int mbusCode) const = 0;
virtual Size resolution() const = 0;
virtual V4L2SubdeviceFormat const std::vector<unsigned int> &mbusCodes() const { return mbusCodes_; }
getFormat(const std::vector<unsigned int> &mbusCodes, std::vector<Size> sizes(unsigned int mbusCode) const;
const Size &size, const Size maxSize = Size()) const = 0; Size resolution() const;
virtual int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity) = 0;
virtual int tryFormat(V4L2SubdeviceFormat *format) const = 0;
virtual int applyConfiguration(const SensorConfiguration &config, V4L2SubdeviceFormat getFormat(const std::vector<unsigned int> &mbusCodes,
Transform transform = Transform::Identity, const Size &size) const;
V4L2SubdeviceFormat *sensorFormat = nullptr) = 0; int setFormat(V4L2SubdeviceFormat *format,
Transform transform = Transform::Identity);
int tryFormat(V4L2SubdeviceFormat *format) const;
virtual V4L2Subdevice::Stream imageStream() const; int applyConfiguration(const SensorConfiguration &config,
virtual std::optional<V4L2Subdevice::Stream> embeddedDataStream() const; Transform transform = Transform::Identity,
virtual V4L2SubdeviceFormat embeddedDataFormat() const; V4L2SubdeviceFormat *sensorFormat = nullptr);
virtual int setEmbeddedDataEnabled(bool enable);
virtual const ControlList &properties() const = 0; const ControlList &properties() const { return properties_; }
virtual int sensorInfo(IPACameraSensorInfo *info) const = 0; int sensorInfo(IPACameraSensorInfo *info) const;
virtual Transform computeTransform(Orientation *orientation) const = 0; Transform computeTransform(Orientation *orientation) const;
virtual BayerFormat::Order bayerOrder(Transform t) const = 0; BayerFormat::Order bayerOrder(Transform t) const;
virtual const ControlInfoMap &controls() const = 0; const ControlInfoMap &controls() const;
virtual ControlList getControls(const std::vector<uint32_t> &ids) = 0; ControlList getControls(const std::vector<uint32_t> &ids);
virtual int setControls(ControlList *ctrls) = 0; int setControls(ControlList *ctrls);
virtual const std::vector<controls::draft::TestPatternModeEnum> & const std::vector<controls::draft::TestPatternModeEnum> &testPatternModes() const
testPatternModes() const = 0; {
virtual int setTestPatternMode(controls::draft::TestPatternModeEnum mode) = 0; return testPatternModes_;
virtual const CameraSensorProperties::SensorDelays &sensorDelays() = 0; }
}; int setTestPatternMode(controls::draft::TestPatternModeEnum mode);
class CameraSensorFactoryBase protected:
{ std::string logPrefix() const override;
public:
CameraSensorFactoryBase(const char *name, int priority);
virtual ~CameraSensorFactoryBase() = default;
static std::unique_ptr<CameraSensor> create(MediaEntity *entity);
const std::string &name() const { return name_; }
int priority() const { return priority_; }
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(CameraSensorFactoryBase) LIBCAMERA_DISABLE_COPY(CameraSensor)
static std::vector<CameraSensorFactoryBase *> &factories(); int generateId();
int validateSensorDriver();
void initVimcDefaultProperties();
void initStaticProperties();
void initTestPatternModes();
int initProperties();
int discoverAncillaryDevices();
int applyTestPatternMode(controls::draft::TestPatternModeEnum mode);
static void registerFactory(CameraSensorFactoryBase *factory); const MediaEntity *entity_;
std::unique_ptr<V4L2Subdevice> subdev_;
unsigned int pad_;
virtual std::variant<std::unique_ptr<CameraSensor>, int> const CameraSensorProperties *staticProps_;
match(MediaEntity *entity) const = 0;
std::string name_; std::string model_;
int priority_; std::string id_;
V4L2Subdevice::Formats formats_;
std::vector<unsigned int> mbusCodes_;
std::vector<Size> sizes_;
std::vector<controls::draft::TestPatternModeEnum> testPatternModes_;
controls::draft::TestPatternModeEnum testPatternMode_;
Size pixelArraySize_;
Rectangle activeArea_;
const BayerFormat *bayerFormat_;
bool supportFlips_;
bool flipsAlterBayerOrder_;
Orientation mountingOrientation_;
ControlList properties_;
std::unique_ptr<CameraLens> focusLens_;
}; };
template<typename _CameraSensor>
class CameraSensorFactory final : public CameraSensorFactoryBase
{
public:
CameraSensorFactory(const char *name, int priority)
: CameraSensorFactoryBase(name, priority)
{
}
private:
std::variant<std::unique_ptr<CameraSensor>, int>
match(MediaEntity *entity) const override
{
return _CameraSensor::match(entity);
}
};
#define REGISTER_CAMERA_SENSOR(sensor, priority) \
static CameraSensorFactory<sensor> global_##sensor##Factory{ #sensor, priority };
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -8,7 +8,6 @@
#pragma once #pragma once
#include <map> #include <map>
#include <stdint.h>
#include <string> #include <string>
#include <libcamera/control_ids.h> #include <libcamera/control_ids.h>
@ -17,18 +16,10 @@
namespace libcamera { namespace libcamera {
struct CameraSensorProperties { struct CameraSensorProperties {
struct SensorDelays {
uint8_t exposureDelay;
uint8_t gainDelay;
uint8_t vblankDelay;
uint8_t hblankDelay;
};
static const CameraSensorProperties *get(const std::string &sensor); static const CameraSensorProperties *get(const std::string &sensor);
Size unitCellSize; Size unitCellSize;
std::map<controls::draft::TestPatternModeEnum, int32_t> testPatternModes; std::map<controls::draft::TestPatternModeEnum, int32_t> testPatternModes;
SensorDelays sensorDelays;
}; };
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -1,68 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Raspberry Pi Ltd
*
* Camera recovery algorithm
*/
#pragma once
#include <stdint.h>
namespace libcamera {
class ClockRecovery
{
public:
ClockRecovery();
void configure(unsigned int numSamples = 100, unsigned int maxJitter = 2000,
unsigned int minSamples = 10, unsigned int errorThreshold = 50000);
void reset();
void addSample();
void addSample(uint64_t input, uint64_t output);
uint64_t getOutput(uint64_t input);
private:
/* Approximate number of samples over which the model state persists. */
unsigned int numSamples_;
/* Remove any output jitter larger than this immediately. */
unsigned int maxJitter_;
/* Number of samples required before we start to use model estimates. */
unsigned int minSamples_;
/* Threshold above which we assume the wallclock has been reset. */
unsigned int errorThreshold_;
/* How many samples seen (up to numSamples_). */
unsigned int count_;
/* This gets subtracted from all input values, just to make the numbers easier. */
uint64_t inputBase_;
/* As above, for the output. */
uint64_t outputBase_;
/* The previous input sample. */
uint64_t lastInput_;
/* The previous output sample. */
uint64_t lastOutput_;
/* Average x value seen so far. */
double xAve_;
/* Average y value seen so far */
double yAve_;
/* Average x^2 value seen so far. */
double x2Ave_;
/* Average x*y value seen so far. */
double xyAve_;
/*
* The latest estimate of linear parameters to derive the output clock
* from the input.
*/
double slope_;
double offset_;
/* Use this cumulative error to monitor for spontaneous clock updates. */
double error_;
};
} /* namespace libcamera */

View file

@ -14,11 +14,9 @@
#include <memory> #include <memory>
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <utility>
#include <vector> #include <vector>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h> #include <libcamera/base/signal.h>
#include <libcamera/geometry.h> #include <libcamera/geometry.h>
@ -28,25 +26,12 @@ namespace libcamera {
class FrameBuffer; class FrameBuffer;
class MediaDevice; class MediaDevice;
class PixelFormat; class PixelFormat;
class Stream;
struct StreamConfiguration; struct StreamConfiguration;
class Converter class Converter
{ {
public: public:
enum class Feature { Converter(MediaDevice *media);
None = 0,
InputCrop = (1 << 0),
};
using Features = Flags<Feature>;
enum class Alignment {
Down = 0,
Up,
};
Converter(MediaDevice *media, Features features = Feature::None);
virtual ~Converter(); virtual ~Converter();
virtual int loadConfiguration(const std::string &filename) = 0; virtual int loadConfiguration(const std::string &filename) = 0;
@ -56,45 +41,25 @@ public:
virtual std::vector<PixelFormat> formats(PixelFormat input) = 0; virtual std::vector<PixelFormat> formats(PixelFormat input) = 0;
virtual SizeRange sizes(const Size &input) = 0; virtual SizeRange sizes(const Size &input) = 0;
virtual Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size,
Alignment align = Alignment::Down) = 0;
virtual std::tuple<unsigned int, unsigned int> virtual std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0; strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) = 0;
virtual int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) = 0;
virtual int configure(const StreamConfiguration &inputCfg, virtual int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0; const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs) = 0;
virtual bool isConfigured(const Stream *stream) const = 0; virtual int exportBuffers(unsigned int output, unsigned int count,
virtual int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0; std::vector<std::unique_ptr<FrameBuffer>> *buffers) = 0;
virtual int start() = 0; virtual int start() = 0;
virtual void stop() = 0; virtual void stop() = 0;
virtual int queueBuffers(FrameBuffer *input, virtual int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) = 0; const std::map<unsigned int, FrameBuffer *> &outputs) = 0;
virtual int setInputCrop(const Stream *stream, Rectangle *rect) = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds() = 0;
virtual std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) = 0;
Signal<FrameBuffer *> inputBufferReady; Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady; Signal<FrameBuffer *> outputBufferReady;
const std::string &deviceNode() const { return deviceNode_; } const std::string &deviceNode() const { return deviceNode_; }
Features features() const { return features_; }
protected:
Features features_;
private: private:
std::string deviceNode_; std::string deviceNode_;
}; };

View file

@ -28,9 +28,7 @@ class FrameBuffer;
class MediaDevice; class MediaDevice;
class Size; class Size;
class SizeRange; class SizeRange;
class Stream;
struct StreamConfiguration; struct StreamConfiguration;
class Rectangle;
class V4L2M2MDevice; class V4L2M2MDevice;
class V4L2M2MConverter : public Converter class V4L2M2MConverter : public Converter
@ -38,45 +36,31 @@ class V4L2M2MConverter : public Converter
public: public:
V4L2M2MConverter(MediaDevice *media); V4L2M2MConverter(MediaDevice *media);
int loadConfiguration([[maybe_unused]] const std::string &filename) override { return 0; } int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
bool isValid() const override { return m2m_ != nullptr; } bool isValid() const { return m2m_ != nullptr; }
std::vector<PixelFormat> formats(PixelFormat input) override; std::vector<PixelFormat> formats(PixelFormat input);
SizeRange sizes(const Size &input) override; SizeRange sizes(const Size &input);
std::tuple<unsigned int, unsigned int> std::tuple<unsigned int, unsigned int>
strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size) override; strideAndFrameSize(const PixelFormat &pixelFormat, const Size &size);
Size adjustInputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
Size adjustOutputSize(const PixelFormat &pixFmt,
const Size &size, Alignment align = Alignment::Down) override;
int configure(const StreamConfiguration &inputCfg, int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfg);
&outputCfg) override; int exportBuffers(unsigned int output, unsigned int count,
bool isConfigured(const Stream *stream) const override; std::vector<std::unique_ptr<FrameBuffer>> *buffers);
int exportBuffers(const Stream *stream, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers) override;
int start() override; int start();
void stop() override; void stop();
int validateOutput(StreamConfiguration *cfg, bool *adjusted,
Alignment align = Alignment::Down) override;
int queueBuffers(FrameBuffer *input, int queueBuffers(FrameBuffer *input,
const std::map<const Stream *, FrameBuffer *> &outputs) override; const std::map<unsigned int, FrameBuffer *> &outputs);
int setInputCrop(const Stream *stream, Rectangle *rect) override;
std::pair<Rectangle, Rectangle> inputCropBounds() override { return inputCropBounds_; }
std::pair<Rectangle, Rectangle> inputCropBounds(const Stream *stream) override;
private: private:
class V4L2M2MStream : protected Loggable class Stream : protected Loggable
{ {
public: public:
V4L2M2MStream(V4L2M2MConverter *converter, const Stream *stream); Stream(V4L2M2MConverter *converter, unsigned int index);
bool isValid() const { return m2m_ != nullptr; } bool isValid() const { return m2m_ != nullptr; }
@ -90,11 +74,6 @@ private:
int queueBuffers(FrameBuffer *input, FrameBuffer *output); int queueBuffers(FrameBuffer *input, FrameBuffer *output);
int setInputSelection(unsigned int target, Rectangle *rect);
int getInputSelection(unsigned int target, Rectangle *rect);
std::pair<Rectangle, Rectangle> inputCropBounds();
protected: protected:
std::string logPrefix() const override; std::string logPrefix() const override;
@ -103,23 +82,17 @@ private:
void outputBufferReady(FrameBuffer *buffer); void outputBufferReady(FrameBuffer *buffer);
V4L2M2MConverter *converter_; V4L2M2MConverter *converter_;
const Stream *stream_; unsigned int index_;
std::unique_ptr<V4L2M2MDevice> m2m_; std::unique_ptr<V4L2M2MDevice> m2m_;
unsigned int inputBufferCount_; unsigned int inputBufferCount_;
unsigned int outputBufferCount_; unsigned int outputBufferCount_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
}; };
Size adjustSizes(const Size &size, const std::vector<SizeRange> &ranges,
Alignment align);
std::unique_ptr<V4L2M2MDevice> m2m_; std::unique_ptr<V4L2M2MDevice> m2m_;
std::map<const Stream *, std::unique_ptr<V4L2M2MStream>> streams_; std::vector<Stream> streams_;
std::map<FrameBuffer *, unsigned int> queue_; std::map<FrameBuffer *, unsigned int> queue_;
std::pair<Rectangle, Rectangle> inputCropBounds_;
}; };
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -1,46 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Google Inc.
*
* Debug metadata helpers
*/
#pragma once
#include <libcamera/control_ids.h>
namespace libcamera {
class DebugMetadata
{
public:
DebugMetadata() = default;
void enableByControl(const ControlList &controls);
void enable(bool enable = true);
void setParent(DebugMetadata *parent);
void moveEntries(ControlList &list);
template<typename T, typename V>
void set(const Control<T> &ctrl, const V &value)
{
if (parent_) {
parent_->set(ctrl, value);
return;
}
if (!enabled_)
return;
cache_.set(ctrl, value);
}
void set(unsigned int id, const ControlValue &value);
private:
bool enabled_ = false;
DebugMetadata *parent_ = nullptr;
ControlList cache_;
};
} /* namespace libcamera */

View file

@ -10,15 +10,13 @@
#include <stdint.h> #include <stdint.h>
#include <unordered_map> #include <unordered_map>
#include <libcamera/base/object.h>
#include <libcamera/controls.h> #include <libcamera/controls.h>
namespace libcamera { namespace libcamera {
class V4L2Device; class V4L2Device;
class DelayedControls : public Object class DelayedControls
{ {
public: public:
struct ControlParams { struct ControlParams {

View file

@ -7,6 +7,7 @@
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include "libcamera/internal/device_enumerator.h" #include "libcamera/internal/device_enumerator.h"

View file

@ -1,80 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper class for dma-buf allocations.
*/
#pragma once
#include <memory>
#include <stdint.h>
#include <string>
#include <vector>
#include <libcamera/base/flags.h>
#include <libcamera/base/shared_fd.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class FrameBuffer;
class DmaBufAllocator
{
public:
enum class DmaBufAllocatorFlag {
CmaHeap = 1 << 0,
SystemHeap = 1 << 1,
UDmaBuf = 1 << 2,
};
using DmaBufAllocatorFlags = Flags<DmaBufAllocatorFlag>;
DmaBufAllocator(DmaBufAllocatorFlags flags = DmaBufAllocatorFlag::CmaHeap);
~DmaBufAllocator();
bool isValid() const { return providerHandle_.isValid(); }
UniqueFD alloc(const char *name, std::size_t size);
int exportBuffers(unsigned int count,
const std::vector<unsigned int> &planeSizes,
std::vector<std::unique_ptr<FrameBuffer>> *buffers);
private:
std::unique_ptr<FrameBuffer> createBuffer(
std::string name, const std::vector<unsigned int> &planeSizes);
UniqueFD allocFromHeap(const char *name, std::size_t size);
UniqueFD allocFromUDmaBuf(const char *name, std::size_t size);
UniqueFD providerHandle_;
DmaBufAllocatorFlag type_;
};
class DmaSyncer final
{
public:
enum class SyncType {
Read = 0,
Write,
ReadWrite,
};
explicit DmaSyncer(SharedFD fd, SyncType type = SyncType::ReadWrite);
DmaSyncer(DmaSyncer &&other) = default;
DmaSyncer &operator=(DmaSyncer &&other) = default;
~DmaSyncer();
private:
LIBCAMERA_DISABLE_COPY(DmaSyncer)
void sync(uint64_t step);
SharedFD fd_;
uint64_t flags_ = 0;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaBufAllocator::DmaBufAllocatorFlag)
} /* namespace libcamera */

View file

@ -0,0 +1,38 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2020, Raspberry Pi Ltd
*
* Helper class for dma-heap allocations.
*/
#pragma once
#include <stddef.h>
#include <libcamera/base/flags.h>
#include <libcamera/base/unique_fd.h>
namespace libcamera {
class DmaHeap
{
public:
enum class DmaHeapFlag {
Cma = 1 << 0,
System = 1 << 1,
};
using DmaHeapFlags = Flags<DmaHeapFlag>;
DmaHeap(DmaHeapFlags flags = DmaHeapFlag::Cma);
~DmaHeap();
bool isValid() const { return dmaHeapHandle_.isValid(); }
UniqueFD alloc(const char *name, std::size_t size);
private:
UniqueFD dmaHeapHandle_;
};
LIBCAMERA_FLAGS_ENABLE_OPERATORS(DmaHeap::DmaHeapFlag)
} /* namespace libcamera */

View file

@ -8,6 +8,7 @@
#pragma once #pragma once
#include <array> #include <array>
#include <map>
#include <vector> #include <vector>
#include <libcamera/geometry.h> #include <libcamera/geometry.h>

View file

@ -8,7 +8,6 @@
#pragma once #pragma once
#include <memory> #include <memory>
#include <stdint.h>
#include <utility> #include <utility>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>

View file

@ -7,7 +7,8 @@
#pragma once #pragma once
#include <stdint.h> #include <deque>
#include <iostream>
#include <string.h> #include <string.h>
#include <tuple> #include <tuple>
#include <type_traits> #include <type_traits>
@ -19,9 +20,10 @@
#include <libcamera/control_ids.h> #include <libcamera/control_ids.h>
#include <libcamera/framebuffer.h> #include <libcamera/framebuffer.h>
#include <libcamera/geometry.h> #include <libcamera/geometry.h>
#include <libcamera/ipa/ipa_interface.h> #include <libcamera/ipa/ipa_interface.h>
#include "libcamera/internal/byte_stream_buffer.h"
#include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/control_serializer.h" #include "libcamera/internal/control_serializer.h"
namespace libcamera { namespace libcamera {
@ -163,7 +165,7 @@ public:
std::vector<SharedFD>::const_iterator fdIter = fdsBegin; std::vector<SharedFD>::const_iterator fdIter = fdsBegin;
for (uint32_t i = 0; i < vecLen; i++) { for (uint32_t i = 0; i < vecLen; i++) {
uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd); uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd); uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8; dataIter += 8;
ret[i] = IPADataSerializer<V>::deserialize(dataIter, ret[i] = IPADataSerializer<V>::deserialize(dataIter,
@ -270,7 +272,7 @@ public:
std::vector<SharedFD>::const_iterator fdIter = fdsBegin; std::vector<SharedFD>::const_iterator fdIter = fdsBegin;
for (uint32_t i = 0; i < mapLen; i++) { for (uint32_t i = 0; i < mapLen; i++) {
uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd); uint32_t sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd); uint32_t sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8; dataIter += 8;
K key = IPADataSerializer<K>::deserialize(dataIter, K key = IPADataSerializer<K>::deserialize(dataIter,
@ -282,7 +284,7 @@ public:
dataIter += sizeofData; dataIter += sizeofData;
fdIter += sizeofFds; fdIter += sizeofFds;
sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd); sizeofData = readPOD<uint32_t>(dataIter, 0, dataEnd);
sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd); sizeofFds = readPOD<uint32_t>(dataIter, 4, dataEnd);
dataIter += 8; dataIter += 8;
const V value = IPADataSerializer<V>::deserialize(dataIter, const V value = IPADataSerializer<V>::deserialize(dataIter,
@ -309,6 +311,7 @@ public:
serialize(const Flags<E> &data, [[maybe_unused]] ControlSerializer *cs = nullptr) serialize(const Flags<E> &data, [[maybe_unused]] ControlSerializer *cs = nullptr)
{ {
std::vector<uint8_t> dataVec; std::vector<uint8_t> dataVec;
dataVec.reserve(sizeof(Flags<E>));
appendPOD<uint32_t>(dataVec, static_cast<typename Flags<E>::Type>(data)); appendPOD<uint32_t>(dataVec, static_cast<typename Flags<E>::Type>(data));
return { dataVec, {} }; return { dataVec, {} };

View file

@ -7,7 +7,6 @@
#pragma once #pragma once
#include <memory>
#include <stdint.h> #include <stdint.h>
#include <vector> #include <vector>
@ -16,7 +15,6 @@
#include <libcamera/ipa/ipa_interface.h> #include <libcamera/ipa/ipa_interface.h>
#include <libcamera/ipa/ipa_module_info.h> #include <libcamera/ipa/ipa_module_info.h>
#include "libcamera/internal/camera_manager.h"
#include "libcamera/internal/ipa_module.h" #include "libcamera/internal/ipa_module.h"
#include "libcamera/internal/pipeline_handler.h" #include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/pub_key.h" #include "libcamera/internal/pub_key.h"
@ -36,13 +34,11 @@ public:
uint32_t minVersion, uint32_t minVersion,
uint32_t maxVersion) uint32_t maxVersion)
{ {
CameraManager *cm = pipe->cameraManager(); IPAModule *m = self_->module(pipe, minVersion, maxVersion);
IPAManager *self = cm->_d()->ipaManager();
IPAModule *m = self->module(pipe, minVersion, maxVersion);
if (!m) if (!m)
return nullptr; return nullptr;
std::unique_ptr<T> proxy = std::make_unique<T>(m, !self->isSignatureValid(m)); std::unique_ptr<T> proxy = std::make_unique<T>(m, !self_->isSignatureValid(m));
if (!proxy->isValid()) { if (!proxy->isValid()) {
LOG(IPAManager, Error) << "Failed to load proxy"; LOG(IPAManager, Error) << "Failed to load proxy";
return nullptr; return nullptr;
@ -59,6 +55,8 @@ public:
#endif #endif
private: private:
static IPAManager *self_;
void parseDir(const char *libDir, unsigned int maxDepth, void parseDir(const char *libDir, unsigned int maxDepth,
std::vector<std::string> &files); std::vector<std::string> &files);
unsigned int addDir(const char *libDir, unsigned int maxDepth = 0); unsigned int addDir(const char *libDir, unsigned int maxDepth = 0);
@ -68,7 +66,7 @@ private:
bool isSignatureValid(IPAModule *ipa) const; bool isSignatureValid(IPAModule *ipa) const;
std::vector<std::unique_ptr<IPAModule>> modules_; std::vector<IPAModule *> modules_;
#if HAVE_IPA_PUBKEY #if HAVE_IPA_PUBKEY
static const uint8_t publicKeyData_[]; static const uint8_t publicKeyData_[];

View file

@ -29,7 +29,7 @@ public:
bool isValid() const; bool isValid() const;
const struct IPAModuleInfo &info() const; const struct IPAModuleInfo &info() const;
const std::vector<uint8_t> &signature() const; const std::vector<uint8_t> signature() const;
const std::string &path() const; const std::string &path() const;
bool load(); bool load();

View file

@ -7,7 +7,9 @@
#pragma once #pragma once
#include <memory>
#include <string> #include <string>
#include <vector>
#include <libcamera/ipa/ipa_interface.h> #include <libcamera/ipa/ipa_interface.h>
@ -29,8 +31,7 @@ public:
bool isValid() const { return valid_; } bool isValid() const { return valid_; }
std::string configurationFile(const std::string &name, std::string configurationFile(const std::string &file) const;
const std::string &fallbackName = std::string()) const;
protected: protected:
std::string resolvePath(const std::string &file) const; std::string resolvePath(const std::string &file) const;

View file

@ -7,7 +7,6 @@
#pragma once #pragma once
#include <stdint.h>
#include <vector> #include <vector>
#include <libcamera/base/shared_fd.h> #include <libcamera/base/shared_fd.h>

View file

@ -9,7 +9,7 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include <stdint.h> #include <vector>
#include "libcamera/internal/ipc_pipe.h" #include "libcamera/internal/ipc_pipe.h"
#include "libcamera/internal/ipc_unixsocket.h" #include "libcamera/internal/ipc_unixsocket.h"

View file

@ -1,226 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
*
* Matrix and related operations
*/
#pragma once
#include <algorithm>
#include <sstream>
#include <type_traits>
#include <vector>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
#include "libcamera/internal/yaml_parser.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(Matrix)
#ifndef __DOXYGEN__
template<typename T>
bool matrixInvert(Span<const T> dataIn, Span<T> dataOut, unsigned int dim,
Span<T> scratchBuffer, Span<unsigned int> swapBuffer);
#endif /* __DOXYGEN__ */
template<typename T, unsigned int Rows, unsigned int Cols>
class Matrix
{
static_assert(std::is_arithmetic_v<T>, "Matrix type must be arithmetic");
public:
constexpr Matrix()
{
}
Matrix(const std::array<T, Rows * Cols> &data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
Matrix(const Span<const T, Rows * Cols> data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
static constexpr Matrix identity()
{
Matrix ret;
for (size_t i = 0; i < std::min(Rows, Cols); i++)
ret[i][i] = static_cast<T>(1);
return ret;
}
~Matrix() = default;
const std::string toString() const
{
std::stringstream out;
out << "Matrix { ";
for (unsigned int i = 0; i < Rows; i++) {
out << "[ ";
for (unsigned int j = 0; j < Cols; j++) {
out << (*this)[i][j];
out << ((j + 1 < Cols) ? ", " : " ");
}
out << ((i + 1 < Rows) ? "], " : "]");
}
out << " }";
return out.str();
}
constexpr Span<const T, Rows * Cols> data() const { return data_; }
constexpr Span<const T, Cols> operator[](size_t i) const
{
return Span<const T, Cols>{ &data_.data()[i * Cols], Cols };
}
constexpr Span<T, Cols> operator[](size_t i)
{
return Span<T, Cols>{ &data_.data()[i * Cols], Cols };
}
#ifndef __DOXYGEN__
template<typename U, std::enable_if_t<std::is_arithmetic_v<U>>>
#else
template<typename U>
#endif /* __DOXYGEN__ */
Matrix<T, Rows, Cols> &operator*=(U d)
{
for (unsigned int i = 0; i < Rows * Cols; i++)
data_[i] *= d;
return *this;
}
Matrix<T, Rows, Cols> inverse(bool *ok = nullptr) const
{
static_assert(Rows == Cols, "Matrix must be square");
Matrix<T, Rows, Cols> inverse;
std::array<T, Rows * Cols * 2> scratchBuffer;
std::array<unsigned int, Rows> swapBuffer;
bool res = matrixInvert(Span<const T>(data_),
Span<T>(inverse.data_),
Rows,
Span<T>(scratchBuffer),
Span<unsigned int>(swapBuffer));
if (ok)
*ok = res;
return inverse;
}
private:
/*
* \todo The initializer is only necessary for the constructor to be
* constexpr in C++17. Remove the initializer as soon as we are on
* C++20.
*/
std::array<T, Rows * Cols> data_ = {};
};
#ifndef __DOXYGEN__
template<typename T, typename U, unsigned int Rows, unsigned int Cols,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
#endif /* __DOXYGEN__ */
Matrix<U, Rows, Cols> operator*(T d, const Matrix<U, Rows, Cols> &m)
{
Matrix<U, Rows, Cols> result;
for (unsigned int i = 0; i < Rows; i++) {
for (unsigned int j = 0; j < Cols; j++)
result[i][j] = d * m[i][j];
}
return result;
}
#ifndef __DOXYGEN__
template<typename T, typename U, unsigned int Rows, unsigned int Cols,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
#endif /* __DOXYGEN__ */
Matrix<U, Rows, Cols> operator*(const Matrix<U, Rows, Cols> &m, T d)
{
return d * m;
}
template<typename T1, unsigned int R1, unsigned int C1, typename T2, unsigned int R2, unsigned int C2>
constexpr Matrix<std::common_type_t<T1, T2>, R1, C2> operator*(const Matrix<T1, R1, C1> &m1,
const Matrix<T2, R2, C2> &m2)
{
static_assert(C1 == R2, "Matrix dimensions must match for multiplication");
Matrix<std::common_type_t<T1, T2>, R1, C2> result;
for (unsigned int i = 0; i < R1; i++) {
for (unsigned int j = 0; j < C2; j++) {
std::common_type_t<T1, T2> sum = 0;
for (unsigned int k = 0; k < C1; k++)
sum += m1[i][k] * m2[k][j];
result[i][j] = sum;
}
}
return result;
}
template<typename T, unsigned int Rows, unsigned int Cols>
constexpr Matrix<T, Rows, Cols> operator+(const Matrix<T, Rows, Cols> &m1, const Matrix<T, Rows, Cols> &m2)
{
Matrix<T, Rows, Cols> result;
for (unsigned int i = 0; i < Rows; i++) {
for (unsigned int j = 0; j < Cols; j++)
result[i][j] = m1[i][j] + m2[i][j];
}
return result;
}
#ifndef __DOXYGEN__
bool matrixValidateYaml(const YamlObject &obj, unsigned int size);
#endif /* __DOXYGEN__ */
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows, unsigned int Cols>
std::ostream &operator<<(std::ostream &out, const Matrix<T, Rows, Cols> &m)
{
out << m.toString();
return out;
}
template<typename T, unsigned int Rows, unsigned int Cols>
struct YamlObject::Getter<Matrix<T, Rows, Cols>> {
std::optional<Matrix<T, Rows, Cols>> get(const YamlObject &obj) const
{
if (!matrixValidateYaml(obj, Rows * Cols))
return std::nullopt;
Matrix<T, Rows, Cols> matrix;
T *data = &matrix[0][0];
unsigned int i = 0;
for (const YamlObject &entry : obj.asList()) {
const auto value = entry.get<T>();
if (!value)
return std::nullopt;
data[i++] = *value;
}
return matrix;
}
};
#endif /* __DOXYGEN__ */
} /* namespace libcamera */

View file

@ -8,6 +8,7 @@
#pragma once #pragma once
#include <map> #include <map>
#include <sstream>
#include <string> #include <string>
#include <vector> #include <vector>
@ -55,8 +56,6 @@ public:
Signal<> disconnected; Signal<> disconnected;
std::vector<MediaEntity *> locateEntities(unsigned int function);
protected: protected:
std::string logPrefix() const override; std::string logPrefix() const override;

View file

@ -48,8 +48,6 @@ public:
unsigned int flags() const { return flags_; } unsigned int flags() const { return flags_; }
int setEnabled(bool enable); int setEnabled(bool enable);
std::string toString() const;
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaLink) LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaLink)
@ -63,8 +61,6 @@ private:
unsigned int flags_; unsigned int flags_;
}; };
std::ostream &operator<<(std::ostream &out, const MediaLink &link);
class MediaPad : public MediaObject class MediaPad : public MediaObject
{ {
public: public:
@ -75,8 +71,6 @@ public:
void addLink(MediaLink *link); void addLink(MediaLink *link);
std::string toString() const;
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaPad) LIBCAMERA_DISABLE_COPY_AND_MOVE(MediaPad)
@ -91,8 +85,6 @@ private:
std::vector<MediaLink *> links_; std::vector<MediaLink *> links_;
}; };
std::ostream &operator<<(std::ostream &out, const MediaPad &pad);
class MediaEntity : public MediaObject class MediaEntity : public MediaObject
{ {
public: public:
@ -112,7 +104,7 @@ public:
unsigned int deviceMinor() const { return minor_; } unsigned int deviceMinor() const { return minor_; }
const std::vector<MediaPad *> &pads() const { return pads_; } const std::vector<MediaPad *> &pads() const { return pads_; }
const std::vector<MediaEntity *> &ancillaryEntities() const { return ancillaryEntities_; } const std::vector<MediaEntity *> ancillaryEntities() const { return ancillaryEntities_; }
const MediaPad *getPadByIndex(unsigned int index) const; const MediaPad *getPadByIndex(unsigned int index) const;
const MediaPad *getPadById(unsigned int id) const; const MediaPad *getPadById(unsigned int id) const;

View file

@ -1,59 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Ideas on Board Oy
*
* Media pipeline support
*/
#pragma once
#include <list>
#include <string>
#include <libcamera/base/log.h>
namespace libcamera {
class CameraSensor;
class MediaEntity;
class MediaLink;
class MediaPad;
struct V4L2SubdeviceFormat;
class MediaPipeline
{
public:
int init(MediaEntity *source, std::string_view sink);
int initLinks();
int configure(CameraSensor *sensor, V4L2SubdeviceFormat *);
private:
struct Entity {
/* The media entity, always valid. */
MediaEntity *entity;
/*
* Whether or not the entity is a subdev that supports the
* routing API.
*/
bool supportsRouting;
/*
* The local sink pad connected to the upstream entity, null for
* the camera sensor at the beginning of the pipeline.
*/
const MediaPad *sink;
/*
* The local source pad connected to the downstream entity, null
* for the video node at the end of the pipeline.
*/
const MediaPad *source;
/*
* The link on the source pad, to the downstream entity, null
* for the video node at the end of the pipeline.
*/
MediaLink *sourceLink;
};
std::list<Entity> entities_;
};
} /* namespace libcamera */

View file

@ -2,6 +2,13 @@
subdir('tracepoints') subdir('tracepoints')
libcamera_tracepoint_header = custom_target(
'tp_header',
input : ['tracepoints.h.in', tracepoint_files],
output : 'tracepoints.h',
command : [gen_tracepoints_header, include_build_dir, '@OUTPUT@', '@INPUT@'],
)
libcamera_internal_headers = files([ libcamera_internal_headers = files([
'bayer_format.h', 'bayer_format.h',
'byte_stream_buffer.h', 'byte_stream_buffer.h',
@ -11,29 +18,23 @@ libcamera_internal_headers = files([
'camera_manager.h', 'camera_manager.h',
'camera_sensor.h', 'camera_sensor.h',
'camera_sensor_properties.h', 'camera_sensor_properties.h',
'clock_recovery.h',
'control_serializer.h', 'control_serializer.h',
'control_validator.h', 'control_validator.h',
'converter.h', 'converter.h',
'debug_controls.h',
'delayed_controls.h', 'delayed_controls.h',
'device_enumerator.h', 'device_enumerator.h',
'device_enumerator_sysfs.h', 'device_enumerator_sysfs.h',
'device_enumerator_udev.h', 'device_enumerator_udev.h',
'dma_buf_allocator.h', 'dma_heaps.h',
'formats.h', 'formats.h',
'framebuffer.h', 'framebuffer.h',
'ipa_data_serializer.h',
'ipa_manager.h', 'ipa_manager.h',
'ipa_module.h', 'ipa_module.h',
'ipa_proxy.h', 'ipa_proxy.h',
'ipc_pipe.h',
'ipc_unixsocket.h', 'ipc_unixsocket.h',
'mapped_framebuffer.h', 'mapped_framebuffer.h',
'matrix.h',
'media_device.h', 'media_device.h',
'media_object.h', 'media_object.h',
'media_pipeline.h',
'pipeline_handler.h', 'pipeline_handler.h',
'process.h', 'process.h',
'pub_key.h', 'pub_key.h',
@ -45,18 +46,8 @@ libcamera_internal_headers = files([
'v4l2_pixelformat.h', 'v4l2_pixelformat.h',
'v4l2_subdevice.h', 'v4l2_subdevice.h',
'v4l2_videodevice.h', 'v4l2_videodevice.h',
'vector.h',
'yaml_parser.h', 'yaml_parser.h',
]) ])
tracepoints_h = custom_target(
'tp_header',
input : ['tracepoints.h.in', tracepoint_files],
output : 'tracepoints.h',
command : [gen_tracepoints, include_build_dir, '@OUTPUT@', '@INPUT@'],
)
libcamera_internal_headers += tracepoints_h
subdir('converter') subdir('converter')
subdir('software_isp') subdir('software_isp')

View file

@ -9,15 +9,19 @@
#include <memory> #include <memory>
#include <queue> #include <queue>
#include <set>
#include <string> #include <string>
#include <sys/types.h> #include <sys/types.h>
#include <vector> #include <vector>
#include <libcamera/base/mutex.h>
#include <libcamera/base/object.h> #include <libcamera/base/object.h>
#include <libcamera/controls.h> #include <libcamera/controls.h>
#include <libcamera/stream.h> #include <libcamera/stream.h>
#include "libcamera/internal/ipa_proxy.h"
namespace libcamera { namespace libcamera {
class Camera; class Camera;
@ -41,7 +45,7 @@ public:
MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator, MediaDevice *acquireMediaDevice(DeviceEnumerator *enumerator,
const DeviceMatch &dm); const DeviceMatch &dm);
bool acquire(Camera *camera); bool acquire();
void release(Camera *camera); void release(Camera *camera);
virtual std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera, virtual std::unique_ptr<CameraConfiguration> generateConfiguration(Camera *camera,
@ -60,16 +64,12 @@ public:
bool completeBuffer(Request *request, FrameBuffer *buffer); bool completeBuffer(Request *request, FrameBuffer *buffer);
void completeRequest(Request *request); void completeRequest(Request *request);
void cancelRequest(Request *request);
std::string configurationFile(const std::string &subdir, std::string configurationFile(const std::string &subdir,
const std::string &name, const std::string &name) const;
bool silent = false) const;
const char *name() const { return name_; } const char *name() const { return name_; }
CameraManager *cameraManager() const { return manager_; }
protected: protected:
void registerCamera(std::shared_ptr<Camera> camera); void registerCamera(std::shared_ptr<Camera> camera);
void hotplugMediaDevice(MediaDevice *media); void hotplugMediaDevice(MediaDevice *media);
@ -77,7 +77,6 @@ protected:
virtual int queueRequestDevice(Camera *camera, Request *request) = 0; virtual int queueRequestDevice(Camera *camera, Request *request) = 0;
virtual void stopDevice(Camera *camera) = 0; virtual void stopDevice(Camera *camera) = 0;
virtual bool acquireDevice(Camera *camera);
virtual void releaseDevice(Camera *camera); virtual void releaseDevice(Camera *camera);
CameraManager *manager_; CameraManager *manager_;
@ -97,7 +96,9 @@ private:
std::queue<Request *> waitingRequests_; std::queue<Request *> waitingRequests_;
const char *name_; const char *name_;
unsigned int useCount_;
Mutex lock_;
unsigned int useCount_ LIBCAMERA_TSA_GUARDED_BY(lock_);
friend class PipelineHandlerFactoryBase; friend class PipelineHandlerFactoryBase;
}; };

View file

@ -11,7 +11,6 @@
#include <string> #include <string>
#include <vector> #include <vector>
#include <libcamera/base/class.h>
#include <libcamera/base/signal.h> #include <libcamera/base/signal.h>
#include <libcamera/base/unique_fd.h> #include <libcamera/base/unique_fd.h>
@ -43,8 +42,6 @@ public:
Signal<enum ExitStatus, int> finished; Signal<enum ExitStatus, int> finished;
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(Process)
void closeAllFdsExcept(const std::vector<int> &fds); void closeAllFdsExcept(const std::vector<int> &fds);
int isolate(); int isolate();
void died(int wstatus); void died(int wstatus);

View file

@ -10,8 +10,6 @@
#include <chrono> #include <chrono>
#include <map> #include <map>
#include <memory> #include <memory>
#include <stdint.h>
#include <unordered_set>
#include <libcamera/base/event_notifier.h> #include <libcamera/base/event_notifier.h>
#include <libcamera/base/timer.h> #include <libcamera/base/timer.h>

View file

@ -8,6 +8,7 @@
*/ */
#pragma once #pragma once
#include <stddef.h>
#include <stdint.h> #include <stdint.h>
#include <string> #include <string>
#include <sys/mman.h> #include <sys/mman.h>

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* SPDX-License-Identifier: LGPL-2.1-or-later */
/* /*
* Copyright (C) 2023-2025 Red Hat Inc. * Copyright (C) 2023, Red Hat Inc.
* *
* Authors: * Authors:
* Hans de Goede <hdegoede@redhat.com> * Hans de Goede <hdegoede@redhat.com>
@ -10,45 +10,20 @@
#pragma once #pragma once
#include <array>
#include <stdint.h>
namespace libcamera { namespace libcamera {
struct DebayerParams { struct DebayerParams {
static constexpr unsigned int kRGBLookupSize = 256; static constexpr unsigned int kGain10 = 256;
struct CcmColumn { unsigned int gainR;
int16_t r; unsigned int gainG;
int16_t g; unsigned int gainB;
int16_t b;
};
using LookupTable = std::array<uint8_t, kRGBLookupSize>; float gamma;
using CcmLookupTable = std::array<CcmColumn, kRGBLookupSize>; /**
* \brief Level of the black point, 0..255, 0 is no correction.
/*
* Color lookup tables when CCM is not used.
*
* Each color of a debayered pixel is amended by the corresponding
* value in the given table.
*/ */
LookupTable red; unsigned int blackLevel;
LookupTable green;
LookupTable blue;
/*
* Color and gamma lookup tables when CCM is used.
*
* Each of the CcmLookupTable's corresponds to a CCM column; together they
* make a complete 3x3 CCM lookup table. The CCM is applied on debayered
* pixels and then the gamma lookup table is used to set the resulting
* values of all the three colors.
*/
CcmLookupTable redCcm;
CcmLookupTable greenCcm;
CcmLookupTable blueCcm;
LookupTable gammaLut;
}; };
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -7,19 +7,16 @@
#pragma once #pragma once
#include <deque>
#include <functional> #include <functional>
#include <initializer_list> #include <initializer_list>
#include <map> #include <map>
#include <memory> #include <memory>
#include <stdint.h>
#include <string> #include <string>
#include <tuple> #include <tuple>
#include <vector> #include <vector>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
#include <libcamera/base/log.h> #include <libcamera/base/log.h>
#include <libcamera/base/object.h>
#include <libcamera/base/signal.h> #include <libcamera/base/signal.h>
#include <libcamera/base/thread.h> #include <libcamera/base/thread.h>
@ -30,7 +27,7 @@
#include <libcamera/ipa/soft_ipa_proxy.h> #include <libcamera/ipa/soft_ipa_proxy.h>
#include "libcamera/internal/camera_sensor.h" #include "libcamera/internal/camera_sensor.h"
#include "libcamera/internal/dma_buf_allocator.h" #include "libcamera/internal/dma_heaps.h"
#include "libcamera/internal/pipeline_handler.h" #include "libcamera/internal/pipeline_handler.h"
#include "libcamera/internal/shared_mem_object.h" #include "libcamera/internal/shared_mem_object.h"
#include "libcamera/internal/software_isp/debayer_params.h" #include "libcamera/internal/software_isp/debayer_params.h"
@ -40,16 +37,14 @@ namespace libcamera {
class DebayerCpu; class DebayerCpu;
class FrameBuffer; class FrameBuffer;
class PixelFormat; class PixelFormat;
class Stream;
struct StreamConfiguration; struct StreamConfiguration;
LOG_DECLARE_CATEGORY(SoftwareIsp) LOG_DECLARE_CATEGORY(SoftwareIsp)
class SoftwareIsp : public Object class SoftwareIsp
{ {
public: public:
SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor, SoftwareIsp(PipelineHandler *pipe, const CameraSensor *sensor);
ControlInfoMap *ipaControls);
~SoftwareIsp(); ~SoftwareIsp();
int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; } int loadConfiguration([[maybe_unused]] const std::string &filename) { return 0; }
@ -65,33 +60,30 @@ public:
int configure(const StreamConfiguration &inputCfg, int configure(const StreamConfiguration &inputCfg,
const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs, const std::vector<std::reference_wrapper<StreamConfiguration>> &outputCfgs,
const ipa::soft::IPAConfigInfo &configInfo); const ControlInfoMap &sensorControls);
int exportBuffers(const Stream *stream, unsigned int count, int exportBuffers(unsigned int output, unsigned int count,
std::vector<std::unique_ptr<FrameBuffer>> *buffers); std::vector<std::unique_ptr<FrameBuffer>> *buffers);
void processStats(const uint32_t frame, const uint32_t bufferId, void processStats(const ControlList &sensorControls);
const ControlList &sensorControls);
int start(); int start();
void stop(); void stop();
void queueRequest(const uint32_t frame, const ControlList &controls); int queueBuffers(FrameBuffer *input,
int queueBuffers(uint32_t frame, FrameBuffer *input, const std::map<unsigned int, FrameBuffer *> &outputs);
const std::map<const Stream *, FrameBuffer *> &outputs);
void process(uint32_t frame, FrameBuffer *input, FrameBuffer *output); void process(FrameBuffer *input, FrameBuffer *output);
Signal<FrameBuffer *> inputBufferReady; Signal<FrameBuffer *> inputBufferReady;
Signal<FrameBuffer *> outputBufferReady; Signal<FrameBuffer *> outputBufferReady;
Signal<uint32_t, uint32_t> ispStatsReady; Signal<> ispStatsReady;
Signal<uint32_t, const ControlList &> metadataReady;
Signal<const ControlList &> setSensorControls; Signal<const ControlList &> setSensorControls;
private: private:
void saveIspParams(); void saveIspParams();
void setSensorCtrls(const ControlList &sensorControls); void setSensorCtrls(const ControlList &sensorControls);
void statsReady(uint32_t frame, uint32_t bufferId); void statsReady();
void inputReady(FrameBuffer *input); void inputReady(FrameBuffer *input);
void outputReady(FrameBuffer *output); void outputReady(FrameBuffer *output);
@ -99,12 +91,9 @@ private:
Thread ispWorkerThread_; Thread ispWorkerThread_;
SharedMemObject<DebayerParams> sharedParams_; SharedMemObject<DebayerParams> sharedParams_;
DebayerParams debayerParams_; DebayerParams debayerParams_;
DmaBufAllocator dmaHeap_; DmaHeap dmaHeap_;
bool ccmEnabled_;
std::unique_ptr<ipa::soft::IPAProxySoft> ipa_; std::unique_ptr<ipa::soft::IPAProxySoft> ipa_;
std::deque<FrameBuffer *> queuedInputBuffers_;
std::deque<FrameBuffer *> queuedOutputBuffers_;
}; };
} /* namespace libcamera */ } /* namespace libcamera */

View file

@ -1,6 +1,6 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */ /* SPDX-License-Identifier: LGPL-2.1-or-later */
/* /*
* Copyright (C) 2020, Google Inc. * Copyright (C) {{year}}, Google Inc.
* *
* Tracepoints with lttng * Tracepoints with lttng
* *

View file

@ -5,8 +5,6 @@
* request.tp - Tracepoints for the request object * request.tp - Tracepoints for the request object
*/ */
#include <stdint.h>
#include <libcamera/framebuffer.h> #include <libcamera/framebuffer.h>
#include "libcamera/internal/request.h" #include "libcamera/internal/request.h"

View file

@ -10,7 +10,6 @@
#include <map> #include <map>
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <stdint.h>
#include <vector> #include <vector>
#include <linux/videodev2.h> #include <linux/videodev2.h>
@ -45,7 +44,6 @@ public:
const std::string &deviceNode() const { return deviceNode_; } const std::string &deviceNode() const { return deviceNode_; }
std::string devicePath() const; std::string devicePath() const;
bool supportsFrameStartEvent();
int setFrameStartEnabled(bool enable); int setFrameStartEnabled(bool enable);
Signal<uint32_t> frameStart; Signal<uint32_t> frameStart;

View file

@ -49,8 +49,6 @@ public:
static const std::vector<V4L2PixelFormat> & static const std::vector<V4L2PixelFormat> &
fromPixelFormat(const PixelFormat &pixelFormat); fromPixelFormat(const PixelFormat &pixelFormat);
bool isGenericLineBasedMetadata() const;
private: private:
uint32_t fourcc_; uint32_t fourcc_;
}; };

View file

@ -10,7 +10,6 @@
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <ostream> #include <ostream>
#include <stdint.h>
#include <string> #include <string>
#include <vector> #include <vector>
@ -177,9 +176,6 @@ private:
std::vector<SizeRange> enumPadSizes(const Stream &stream, std::vector<SizeRange> enumPadSizes(const Stream &stream,
unsigned int code); unsigned int code);
int getRoutingLegacy(Routing *routing, Whence whence);
int setRoutingLegacy(Routing *routing, Whence whence);
const MediaEntity *entity_; const MediaEntity *entity_;
std::string model_; std::string model_;

View file

@ -8,6 +8,7 @@
#pragma once #pragma once
#include <array> #include <array>
#include <atomic>
#include <memory> #include <memory>
#include <optional> #include <optional>
#include <ostream> #include <ostream>
@ -157,7 +158,7 @@ private:
std::vector<Plane> planes_; std::vector<Plane> planes_;
}; };
uint64_t lastUsedCounter_; std::atomic<uint64_t> lastUsedCounter_;
std::vector<Entry> cache_; std::vector<Entry> cache_;
/* \todo Expose the miss counter through an instrumentation API. */ /* \todo Expose the miss counter through an instrumentation API. */
unsigned int missCounter_; unsigned int missCounter_;
@ -207,7 +208,6 @@ public:
int setFormat(V4L2DeviceFormat *format); int setFormat(V4L2DeviceFormat *format);
Formats formats(uint32_t code = 0); Formats formats(uint32_t code = 0);
int getSelection(unsigned int target, Rectangle *rect);
int setSelection(unsigned int target, Rectangle *rect); int setSelection(unsigned int target, Rectangle *rect);
int allocateBuffers(unsigned int count, int allocateBuffers(unsigned int count,

View file

@ -1,371 +0,0 @@
/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
* Copyright (C) 2024, Paul Elder <paul.elder@ideasonboard.com>
*
* Vector and related operations
*/
#pragma once
#include <algorithm>
#include <array>
#include <cmath>
#include <functional>
#include <numeric>
#include <optional>
#include <ostream>
#include <type_traits>
#include <libcamera/base/log.h>
#include <libcamera/base/span.h>
#include "libcamera/internal/matrix.h"
#include "libcamera/internal/yaml_parser.h"
namespace libcamera {
LOG_DECLARE_CATEGORY(Vector)
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows,
std::enable_if_t<std::is_arithmetic_v<T>> * = nullptr>
#else
template<typename T, unsigned int Rows>
#endif /* __DOXYGEN__ */
class Vector
{
public:
constexpr Vector() = default;
constexpr explicit Vector(T scalar)
{
data_.fill(scalar);
}
constexpr Vector(const std::array<T, Rows> &data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
constexpr Vector(const Span<const T, Rows> data)
{
std::copy(data.begin(), data.end(), data_.begin());
}
const T &operator[](size_t i) const
{
ASSERT(i < data_.size());
return data_[i];
}
T &operator[](size_t i)
{
ASSERT(i < data_.size());
return data_[i];
}
constexpr Vector<T, Rows> operator-() const
{
Vector<T, Rows> ret;
for (unsigned int i = 0; i < Rows; i++)
ret[i] = -data_[i];
return ret;
}
constexpr Vector operator+(const Vector &other) const
{
return apply(*this, other, std::plus<>{});
}
constexpr Vector operator+(T scalar) const
{
return apply(*this, scalar, std::plus<>{});
}
constexpr Vector operator-(const Vector &other) const
{
return apply(*this, other, std::minus<>{});
}
constexpr Vector operator-(T scalar) const
{
return apply(*this, scalar, std::minus<>{});
}
constexpr Vector operator*(const Vector &other) const
{
return apply(*this, other, std::multiplies<>{});
}
constexpr Vector operator*(T scalar) const
{
return apply(*this, scalar, std::multiplies<>{});
}
constexpr Vector operator/(const Vector &other) const
{
return apply(*this, other, std::divides<>{});
}
constexpr Vector operator/(T scalar) const
{
return apply(*this, scalar, std::divides<>{});
}
Vector &operator+=(const Vector &other)
{
return apply(other, [](T a, T b) { return a + b; });
}
Vector &operator+=(T scalar)
{
return apply(scalar, [](T a, T b) { return a + b; });
}
Vector &operator-=(const Vector &other)
{
return apply(other, [](T a, T b) { return a - b; });
}
Vector &operator-=(T scalar)
{
return apply(scalar, [](T a, T b) { return a - b; });
}
Vector &operator*=(const Vector &other)
{
return apply(other, [](T a, T b) { return a * b; });
}
Vector &operator*=(T scalar)
{
return apply(scalar, [](T a, T b) { return a * b; });
}
Vector &operator/=(const Vector &other)
{
return apply(other, [](T a, T b) { return a / b; });
}
Vector &operator/=(T scalar)
{
return apply(scalar, [](T a, T b) { return a / b; });
}
constexpr Vector min(const Vector &other) const
{
return apply(*this, other, [](T a, T b) { return std::min(a, b); });
}
constexpr Vector min(T scalar) const
{
return apply(*this, scalar, [](T a, T b) { return std::min(a, b); });
}
constexpr Vector max(const Vector &other) const
{
return apply(*this, other, [](T a, T b) { return std::max(a, b); });
}
constexpr Vector max(T scalar) const
{
return apply(*this, scalar, [](T a, T b) -> T { return std::max(a, b); });
}
constexpr T dot(const Vector<T, Rows> &other) const
{
T ret = 0;
for (unsigned int i = 0; i < Rows; i++)
ret += data_[i] * other[i];
return ret;
}
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr const T &x() const { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr const T &y() const { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr const T &z() const { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr T &x() { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr T &y() { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr T &z() { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr const T &r() const { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr const T &g() const { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr const T &b() const { return data_[2]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 1>>
#endif /* __DOXYGEN__ */
constexpr T &r() { return data_[0]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 2>>
#endif /* __DOXYGEN__ */
constexpr T &g() { return data_[1]; }
#ifndef __DOXYGEN__
template<bool Dependent = false, typename = std::enable_if_t<Dependent || Rows >= 3>>
#endif /* __DOXYGEN__ */
constexpr T &b() { return data_[2]; }
constexpr double length2() const
{
double ret = 0;
for (unsigned int i = 0; i < Rows; i++)
ret += data_[i] * data_[i];
return ret;
}
constexpr double length() const
{
return std::sqrt(length2());
}
template<typename R = T>
constexpr R sum() const
{
return std::accumulate(data_.begin(), data_.end(), R{});
}
private:
template<class BinaryOp>
static constexpr Vector apply(const Vector &lhs, const Vector &rhs, BinaryOp op)
{
Vector result;
std::transform(lhs.data_.begin(), lhs.data_.end(),
rhs.data_.begin(), result.data_.begin(),
op);
return result;
}
template<class BinaryOp>
static constexpr Vector apply(const Vector &lhs, T rhs, BinaryOp op)
{
Vector result;
std::transform(lhs.data_.begin(), lhs.data_.end(),
result.data_.begin(),
[&op, rhs](T v) { return op(v, rhs); });
return result;
}
template<class BinaryOp>
Vector &apply(const Vector &other, BinaryOp op)
{
auto itOther = other.data_.begin();
std::for_each(data_.begin(), data_.end(),
[&op, &itOther](T &v) { v = op(v, *itOther++); });
return *this;
}
template<class BinaryOp>
Vector &apply(T scalar, BinaryOp op)
{
std::for_each(data_.begin(), data_.end(),
[&op, scalar](T &v) { v = op(v, scalar); });
return *this;
}
std::array<T, Rows> data_;
};
template<typename T>
using RGB = Vector<T, 3>;
template<typename T, typename U, unsigned int Rows, unsigned int Cols>
Vector<std::common_type_t<T, U>, Rows> operator*(const Matrix<T, Rows, Cols> &m, const Vector<U, Cols> &v)
{
Vector<std::common_type_t<T, U>, Rows> result;
for (unsigned int i = 0; i < Rows; i++) {
std::common_type_t<T, U> sum = 0;
for (unsigned int j = 0; j < Cols; j++)
sum += m[i][j] * v[j];
result[i] = sum;
}
return result;
}
template<typename T, unsigned int Rows>
bool operator==(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
{
for (unsigned int i = 0; i < Rows; i++) {
if (lhs[i] != rhs[i])
return false;
}
return true;
}
template<typename T, unsigned int Rows>
bool operator!=(const Vector<T, Rows> &lhs, const Vector<T, Rows> &rhs)
{
return !(lhs == rhs);
}
#ifndef __DOXYGEN__
bool vectorValidateYaml(const YamlObject &obj, unsigned int size);
#endif /* __DOXYGEN__ */
#ifndef __DOXYGEN__
template<typename T, unsigned int Rows>
std::ostream &operator<<(std::ostream &out, const Vector<T, Rows> &v)
{
out << "Vector { ";
for (unsigned int i = 0; i < Rows; i++) {
out << v[i];
out << ((i + 1 < Rows) ? ", " : " ");
}
out << " }";
return out;
}
template<typename T, unsigned int Rows>
struct YamlObject::Getter<Vector<T, Rows>> {
std::optional<Vector<T, Rows>> get(const YamlObject &obj) const
{
if (!vectorValidateYaml(obj, Rows))
return std::nullopt;
Vector<T, Rows> vector;
unsigned int i = 0;
for (const YamlObject &entry : obj.asList()) {
const auto value = entry.get<T>();
if (!value)
return std::nullopt;
vector[i++] = *value;
}
return vector;
}
};
#endif /* __DOXYGEN__ */
} /* namespace libcamera */

View file

@ -10,9 +10,7 @@
#include <iterator> #include <iterator>
#include <map> #include <map>
#include <optional> #include <optional>
#include <stdint.h>
#include <string> #include <string>
#include <string_view>
#include <vector> #include <vector>
#include <libcamera/base/class.h> #include <libcamera/base/class.h>
@ -160,34 +158,37 @@ public:
{ {
return type_ == Type::Dictionary; return type_ == Type::Dictionary;
} }
bool isEmpty() const
{
return type_ == Type::Empty;
}
explicit operator bool() const
{
return type_ != Type::Empty;
}
std::size_t size() const; std::size_t size() const;
#ifndef __DOXYGEN__
template<typename T,
std::enable_if_t<
std::is_same_v<bool, T> ||
std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> ||
std::is_same_v<int16_t, T> ||
std::is_same_v<uint16_t, T> ||
std::is_same_v<int32_t, T> ||
std::is_same_v<uint32_t, T> ||
std::is_same_v<std::string, T> ||
std::is_same_v<Size, T>> * = nullptr>
#else
template<typename T> template<typename T>
std::optional<T> get() const #endif
{ std::optional<T> get() const;
return Getter<T>{}.get(*this);
}
template<typename T, typename U> template<typename T>
T get(U &&defaultValue) const T get(const T &defaultValue) const
{ {
return get<T>().value_or(std::forward<U>(defaultValue)); return get<T>().value_or(defaultValue);
} }
#ifndef __DOXYGEN__ #ifndef __DOXYGEN__
template<typename T, template<typename T,
std::enable_if_t< std::enable_if_t<
std::is_same_v<bool, T> || std::is_same_v<bool, T> ||
std::is_same_v<float, T> ||
std::is_same_v<double, T> || std::is_same_v<double, T> ||
std::is_same_v<int8_t, T> || std::is_same_v<int8_t, T> ||
std::is_same_v<uint8_t, T> || std::is_same_v<uint8_t, T> ||
@ -207,33 +208,25 @@ public:
const YamlObject &operator[](std::size_t index) const; const YamlObject &operator[](std::size_t index) const;
bool contains(std::string_view key) const; bool contains(const std::string &key) const;
const YamlObject &operator[](std::string_view key) const; const YamlObject &operator[](const std::string &key) const;
private: private:
LIBCAMERA_DISABLE_COPY_AND_MOVE(YamlObject) LIBCAMERA_DISABLE_COPY_AND_MOVE(YamlObject)
template<typename T>
friend struct Getter;
friend class YamlParserContext; friend class YamlParserContext;
enum class Type { enum class Type {
Dictionary, Dictionary,
List, List,
Value, Value,
Empty,
};
template<typename T, typename Enable = void>
struct Getter {
std::optional<T> get(const YamlObject &obj) const;
}; };
Type type_; Type type_;
std::string value_; std::string value_;
Container list_; Container list_;
std::map<std::string, YamlObject *, std::less<>> dictionary_; std::map<std::string, YamlObject *> dictionary_;
}; };
class YamlParser final class YamlParser final

View file

@ -46,8 +46,7 @@ struct ipa_control_info_entry {
uint32_t id; uint32_t id;
uint32_t type; uint32_t type;
uint32_t offset; uint32_t offset;
uint8_t direction; uint32_t padding[1];
uint8_t padding[3];
}; };
#ifdef __cplusplus #ifdef __cplusplus

View file

@ -7,6 +7,19 @@
#pragma once #pragma once
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <vector>
#include <libcamera/base/flags.h>
#include <libcamera/base/signal.h>
#include <libcamera/controls.h>
#include <libcamera/framebuffer.h>
#include <libcamera/geometry.h>
namespace libcamera { namespace libcamera {
/* /*
@ -20,8 +33,8 @@ public:
virtual ~IPAInterface() = default; virtual ~IPAInterface() = default;
}; };
} /* namespace libcamera */
extern "C" { extern "C" {
libcamera::IPAInterface *ipaCreate(); libcamera::IPAInterface *ipaCreate();
} }
} /* namespace libcamera */

View file

@ -31,14 +31,14 @@ interface IPAIPU3Interface {
unmapBuffers(array<uint32> ids); unmapBuffers(array<uint32> ids);
[async] queueRequest(uint32 frame, libcamera.ControlList controls); [async] queueRequest(uint32 frame, libcamera.ControlList controls);
[async] computeParams(uint32 frame, uint32 bufferId); [async] fillParamsBuffer(uint32 frame, uint32 bufferId);
[async] processStats(uint32 frame, int64 frameTimestamp, [async] processStatsBuffer(uint32 frame, int64 frameTimestamp,
uint32 bufferId, libcamera.ControlList sensorControls); uint32 bufferId, libcamera.ControlList sensorControls);
}; };
interface IPAIPU3EventInterface { interface IPAIPU3EventInterface {
setSensorControls(uint32 frame, libcamera.ControlList sensorControls, setSensorControls(uint32 frame, libcamera.ControlList sensorControls,
libcamera.ControlList lensControls); libcamera.ControlList lensControls);
paramsComputed(uint32 frame); paramsBufferReady(uint32 frame);
metadataReady(uint32 frame, libcamera.ControlList metadata); metadataReady(uint32 frame, libcamera.ControlList metadata);
}; };

Some files were not shown because too many files have changed in this diff Show more